diff --git a/bin/swauth-cleanup-tokens b/bin/swauth-cleanup-tokens index 3ca86cd990..bf7f1e610c 100755 --- a/bin/swauth-cleanup-tokens +++ b/bin/swauth-cleanup-tokens @@ -25,7 +25,7 @@ from optparse import OptionParser from sys import argv, exit from time import sleep, time -from swift.common.client import Connection +from swift.common.client import Connection, ClientException if __name__ == '__main__': @@ -65,7 +65,17 @@ if __name__ == '__main__': while True: if options.verbose: print 'GET %s?marker=%s' % (container, marker) - objs = conn.get_container(container, marker=marker)[1] + try: + objs = conn.get_container(container, marker=marker)[1] + except ClientException, e: + if e.http_status == 404: + print 'Container %s not found' % (container) + print 'swauth-prep needs to be rerun' + exit() + else: + print 'Object listing on container %s failed with ' \ + 'status code %d' % (container, e.http_status) + break if objs: marker = objs[-1]['name'] else: @@ -90,7 +100,13 @@ if __name__ == '__main__': (container, obj['name'], time() - detail['expires']) print 'DELETE %s/%s' % (container, obj['name']) - conn.delete_object(container, obj['name']) + try: + conn.delete_object(container, obj['name']) + except ClientException, e: + if e.http_status != 404: + print 'DELETE of %s/%s failed with status ' \ + 'code %d' % (container, obj['name'], + e.http_status) elif options.verbose: print "%s/%s won't expire for %ds; skipping" % \ (container, obj['name'], diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index 1d23226be3..47cc1f2aa6 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -11,8 +11,8 @@ virtual machine will emulate running a four node Swift cluster. * Get the *Ubuntu 10.04 LTS (Lucid Lynx)* server image: - - Ubuntu Server ISO: http://releases.ubuntu.com/10.04/ubuntu-10.04.1-server-amd64.iso (682 MB) - - Ubuntu Live/Install: http://cdimage.ubuntu.com/releases/10.04/release/ubuntu-10.04-dvd-amd64.iso (4.1 GB) + - Ubuntu Server ISO: http://releases.ubuntu.com/lucid/ubuntu-10.04.2-server-amd64.iso (717 MB) + - Ubuntu Live/Install: http://cdimage.ubuntu.com/releases/lucid/release/ubuntu-10.04.2-dvd-amd64.iso (4.2 GB) - Ubuntu Mirrors: https://launchpad.net/ubuntu/+cdmirrors * Create guest virtual machine from the Ubuntu image. @@ -70,6 +70,7 @@ Using a loopback device for storage If you want to use a loopback device instead of another partition, follow these instructions. + #. `mkdir /srv` #. `dd if=/dev/zero of=/srv/swift-disk bs=1024 count=0 seek=1000000` (modify seek to make a larger or smaller partition) #. `mkfs.xfs -i size=1024 /srv/swift-disk` @@ -79,7 +80,6 @@ If you want to use a loopback device instead of another partition, follow these #. `mount /mnt/sdb1` #. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4` #. `chown : /mnt/sdb1/*` - #. `mkdir /srv` #. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done` #. `mkdir -p /etc/swift/object-server /etc/swift/container-server /etc/swift/account-server /srv/1/node/sdb1 /srv/2/node/sdb2 /srv/3/node/sdb3 /srv/4/node/sdb4 /var/run/swift` #. `chown -R : /etc/swift /srv/[1-4]/ /var/run/swift` -- **Make sure to include the trailing slash after /srv/[1-4]/** @@ -563,7 +563,9 @@ Sample configuration files are provided with all defaults in line-by-line commen Setting up scripts for running Swift ------------------------------------ - #. Create `~/bin/resetswift.` If you are using a loopback device substitute `/dev/sdb1` with `/srv/swift-disk`:: + #. Create `~/bin/resetswift.` + If you are using a loopback device substitute `/dev/sdb1` with `/srv/swift-disk`. + If you did not set up rsyslog for individual logging, remove the `find /var/log/swift...` line:: #!/bin/bash diff --git a/swift/common/bench.py b/swift/common/bench.py index 482c2d77aa..c500493187 100644 --- a/swift/common/bench.py +++ b/swift/common/bench.py @@ -147,6 +147,16 @@ class BenchDELETE(Bench): self.total = len(names) self.msg = 'DEL' + def run(self): + Bench.run(self) + for container in self.containers: + try: + client.delete_container(self.url, self.token, container) + except client.ClientException, e: + if e.http_status != 409: + self._log_status("Unable to delete container '%s'. " \ + "Got http status '%d'." % (container, e.http_status)) + def _run(self, thread): if time.time() - self.heartbeat>= 15: self.heartbeat = time.time() diff --git a/swift/common/utils.py b/swift/common/utils.py index 1ba61c2abd..c50188e3e3 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -776,7 +776,7 @@ def readconf(conf, section_name=None, log_name=None, defaults=None): return conf -def write_pickle(obj, dest, tmp): +def write_pickle(obj, dest, tmp, pickle_protocol=0): """ Ensure that a pickle file gets written to disk. The file is first written to a tmp location, ensure it is synced to disk, then @@ -785,10 +785,11 @@ def write_pickle(obj, dest, tmp): :param obj: python object to be pickled :param dest: path of final destination file :param tmp: path to tmp to use + :param pickle_protocol: protocol to pickle the obj with, defaults to 0 """ - fd, tmppath = mkstemp(dir=tmp) + fd, tmppath = mkstemp(dir=tmp, suffix='.tmp') with os.fdopen(fd, 'wb') as fo: - pickle.dump(obj, fo) + pickle.dump(obj, fo, pickle_protocol) fo.flush() os.fsync(fd) renamer(tmppath, dest) @@ -990,6 +991,8 @@ def get_remote_client(req): if not client and 'x-forwarded-for' in req.headers: # remote host for other lbs client = req.headers['x-forwarded-for'].split(',')[0].strip() + if not client: + client = req.remote_addr return client diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 5f4494b736..275a94cf63 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -25,12 +25,6 @@ import mimetools import eventlet from eventlet import greenio, GreenPool, sleep, wsgi, listen from paste.deploy import loadapp, appconfig - -# Hook to ensure connection resets don't blow up our servers. -# Remove with next release of Eventlet that has it in the set already. -from errno import ECONNRESET -wsgi.ACCEPT_ERRNO.add(ECONNRESET) - from eventlet.green import socket, ssl from swift.common.utils import get_logger, drop_privileges, \ @@ -124,8 +118,8 @@ def run_wsgi(conf_file, app_section, *args, **kwargs): # remaining tasks should not require elevated privileges drop_privileges(conf.get('user', 'swift')) - # finally after binding to ports and privilege drop, run app __init__ code - app = loadapp('config:%s' % conf_file, global_conf={'log_name': log_name}) + # Ensure the application can be loaded before proceeding. + loadapp('config:%s' % conf_file, global_conf={'log_name': log_name}) # redirect errors to logger and close stdio capture_stdio(logger) @@ -135,6 +129,8 @@ def run_wsgi(conf_file, app_section, *args, **kwargs): eventlet.hubs.use_hub('poll') eventlet.patcher.monkey_patch(all=False, socket=True) monkey_patch_mimetools() + app = loadapp('config:%s' % conf_file, + global_conf={'log_name': log_name}) pool = GreenPool(size=1024) try: wsgi.server(sock, app, NullLogger(), custom_pool=pool) diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index 01c0da056f..f58bace4b8 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -30,7 +30,7 @@ from eventlet.support.greenlets import GreenletExit from swift.common.ring import Ring from swift.common.utils import whataremyips, unlink_older_than, lock_path, \ - renamer, compute_eta, get_logger + compute_eta, get_logger, write_pickle from swift.common.bufferedhttp import http_connect from swift.common.daemon import Daemon @@ -105,9 +105,7 @@ def invalidate_hash(suffix_dir): except Exception: return hashes[suffix] = None - with open(hashes_file + '.tmp', 'wb') as fp: - pickle.dump(hashes, fp, PICKLE_PROTOCOL) - renamer(hashes_file + '.tmp', hashes_file) + write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL) def get_hashes(partition_dir, recalculate=[], do_listdir=False, @@ -157,9 +155,7 @@ def get_hashes(partition_dir, recalculate=[], do_listdir=False, modified = True sleep() if modified: - with open(hashes_file + '.tmp', 'wb') as fp: - pickle.dump(hashes, fp, PICKLE_PROTOCOL) - renamer(hashes_file + '.tmp', hashes_file) + write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL) return hashed, hashes diff --git a/test/probe/test_container_failures.py b/test/probe/test_container_failures.py index 585835d2a8..a493bffc27 100755 --- a/test/probe/test_container_failures.py +++ b/test/probe/test_container_failures.py @@ -15,13 +15,17 @@ # limitations under the License. import unittest +import os from os import kill from signal import SIGTERM from subprocess import Popen from time import sleep from uuid import uuid4 +import eventlet +import sqlite3 from swift.common import client +from swift.common.utils import hash_path, readconf from test.probe.common import get_to_final_state, kill_pids, reset_environment @@ -316,6 +320,61 @@ class TestContainerFailures(unittest.TestCase): self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) + def _get_db_file_path(self, obj_dir): + files = sorted(os.listdir(obj_dir), reverse=True) + for file in files: + if file.endswith('db'): + return os.path.join(obj_dir, file) + + def _get_container_db_files(self, container): + opart, onodes = self.container_ring.get_nodes(self.account, container) + onode = onodes[0] + db_files = [] + for onode in onodes: + node_id = (onode['port'] - 6000) / 10 + device = onode['device'] + hash_str = hash_path(self.account, container) + server_conf = readconf('/etc/swift/container-server/%s.conf' % + node_id) + devices = server_conf['app:container-server']['devices'] + obj_dir = '%s/%s/containers/%s/%s/%s/' % (devices, + device, opart, + hash_str[-3:], hash_str) + db_files.append(self._get_db_file_path(obj_dir)) + + return db_files + + def test_locked_container_dbs(self): + + def run_test(num_locks, catch_503): + container = 'container-%s' % uuid4() + client.put_container(self.url, self.token, container) + db_files = self._get_container_db_files(container) + db_conns = [] + for i in range(num_locks): + db_conn = sqlite3.connect(db_files[i]) + db_conn.execute('begin exclusive transaction') + db_conns.append(db_conn) + if catch_503: + try: + client.delete_container(self.url, self.token, container) + except client.ClientException, e: + self.assertEquals(e.http_status, 503) + else: + client.delete_container(self.url, self.token, container) + + pool = eventlet.GreenPool() + try: + with eventlet.Timeout(15): + p = pool.spawn(run_test, 1, False) + r = pool.spawn(run_test, 2, True) + q = pool.spawn(run_test, 3, True) + pool.waitall() + except eventlet.Timeout, e: + raise Exception( + "The server did not return a 503 on container db locks, " + "it just hangs: %s" % e) + if __name__ == '__main__': unittest.main() diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 782a31d2c5..efd1ee857d 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -161,8 +161,10 @@ def fake_http_connect(*code_iter, **kwargs): self.body = body def getresponse(self): - if 'raise_exc' in kwargs: + if kwargs.get('raise_exc'): raise Exception('test') + if kwargs.get('raise_timeout_exc'): + raise TimeoutError() return self def getexpect(self): @@ -341,6 +343,14 @@ class TestController(unittest.TestCase): self.assertEqual(p, partition) self.assertEqual(n, nodes) + def test_make_requests(self): + with save_globals(): + proxy_server.http_connect = fake_http_connect(200) + partition, nodes = self.controller.account_info(self.account) + proxy_server.http_connect = fake_http_connect(201, + raise_timeout_exc=True) + self.controller._make_request(nodes, partition, 'POST','/','','') + # tests if 200 is cached and used def test_account_info_200(self): with save_globals(): @@ -1893,8 +1903,8 @@ class TestObjectController(unittest.TestCase): _test_sockets orig_update_request = prosrv.update_request - def broken_update_request(env, req): - raise Exception('fake') + def broken_update_request(*args, **kwargs): + raise Exception('fake: this should be printed') prosrv.update_request = broken_update_request sock = connect_tcp(('localhost', prolis.getsockname()[1])) @@ -1925,6 +1935,35 @@ class TestObjectController(unittest.TestCase): self.assertEquals(headers[:len(exp)], exp) self.assert_('\r\nContent-Length: 0\r\n' in headers) + def test_client_ip_logging(self): + # test that the client ip field in the log gets populated with the + # ip instead of being blank + (prosrv, acc1srv, acc2srv, con2srv, con2srv, obj1srv, obj2srv) = \ + _test_servers + (prolis, acc1lis, acc2lis, con2lis, con2lis, obj1lis, obj2lis) = \ + _test_sockets + + class Logger(object): + + def info(self, msg): + self.msg = msg + + orig_logger, orig_access_logger = prosrv.logger, prosrv.access_logger + prosrv.logger = prosrv.access_logger = Logger() + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write( + 'GET /v1/a?format=json HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\n' + '\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 200' + self.assertEquals(headers[:len(exp)], exp) + exp = '127.0.0.1 127.0.0.1' + self.assert_(exp in prosrv.logger.msg) + def test_chunked_put_logging(self): # GET account with a query string to test that # Application.log_request logs the query string. Also, throws

AltStyle によって変換されたページ (->オリジナル) /