From 7e4ca4b809e42a67763a388b23e90a107d6f717c Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Fri, 27 Aug 2010 21:24:59 -0400 Subject: [PATCH] Implementation of the max-requests feature. Works on sync and eventlet works. Doesn't work on gevent_pywsig or gevent_wsgi workers as we don't control their main loops. Tornado workers appear to be broken. Worst of all, this causes vanilla gevent workers to segfault. I'm waiting to see if there's a known issue before considering what to do next. Worst case we could refuse to run with the bad combination of settings. --- gunicorn/config.py | 19 +++++++++++++++++++ gunicorn/http/wsgi.py | 6 +++++- gunicorn/workers/async.py | 12 +++++++----- gunicorn/workers/base.py | 1 + gunicorn/workers/sync.py | 21 +++++++++++---------- 5 files changed, 43 insertions(+), 16 deletions(-) diff --git a/gunicorn/config.py b/gunicorn/config.py index c8120ff1..c0b413de 100644 --- a/gunicorn/config.py +++ b/gunicorn/config.py @@ -321,6 +321,25 @@ class WorkerConnections(Setting): This setting only affects the Eventlet and Gevent worker types. """ +class MaxRequests(Setting): + name = "max_requests" + section = "Worker Processes" + cli = ["--max-requests"] + meta = "INT" + validator = validate_pos_int + type = "int" + default = 0 + desc = """\ + The maximum number of requests a worker will process before restarting. + + Any value greater than zero will limit the number of requests a work + will process before automatically restarting. This is a simple method + to help limit the damage of memory leaks. + + If this is set to zero (the default) then the automatic worker + restarts are disabled. + """ + class Timeout(Setting): name = "timeout" section = "Worker Processes" diff --git a/gunicorn/http/wsgi.py b/gunicorn/http/wsgi.py index 0df5d2ca..6e8a4bfc 100644 --- a/gunicorn/http/wsgi.py +++ b/gunicorn/http/wsgi.py @@ -115,9 +115,13 @@ class Response(object): self.version = SERVER_VERSION self.status = None self.chunked = False + self.should_close = req.should_close() self.headers = [] self.headers_sent = False + def force_close(self): + self.should_close = True + def start_response(self, status, headers, exc_info=None): if exc_info: try: @@ -151,7 +155,7 @@ class Response(object): def default_headers(self): connection = "keep-alive" - if self.req.should_close(): + if self.should_close: connection = "close" return [ diff --git a/gunicorn/workers/async.py b/gunicorn/workers/async.py index 2bba1635..0757b021 100644 --- a/gunicorn/workers/async.py +++ b/gunicorn/workers/async.py @@ -29,15 +29,13 @@ class AsyncWorker(base.Worker): try: parser = http.RequestParser(client) try: - while True: + while self.alive: req = None with self.timeout_ctx(): req = parser.next() - if not req: break self.handle_request(req, client, addr) - except StopIteration: pass except socket.error, e: @@ -64,8 +62,12 @@ class AsyncWorker(base.Worker): try: debug = self.cfg.debug or False self.cfg.pre_request(self, req) - resp, environ = wsgi.create(req, sock, addr, self.address, - self.cfg) + resp, environ = wsgi.create(req, sock, addr, self.address, self.cfg) + self.nr += 1 + if self.nr >= self.max_requests: + self.log.info("Autorestarting worker after current request.") + resp.force_close() + self.alive = False respiter = self.wsgi(environ, resp.start_response) if respiter == ALREADY_HANDLED: return False diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py index e87f2939..27c51617 100644 --- a/gunicorn/workers/base.py +++ b/gunicorn/workers/base.py @@ -37,6 +37,7 @@ class Worker(object): self.booted = False self.nr = 0 + self.max_requests = cfg.max_requests or sys.maxint self.alive = True self.spinner = 0 self.log = logging.getLogger(__name__) diff --git a/gunicorn/workers/sync.py b/gunicorn/workers/sync.py index bdc6f8a0..c2d7eea5 100644 --- a/gunicorn/workers/sync.py +++ b/gunicorn/workers/sync.py @@ -18,14 +18,11 @@ import gunicorn.workers.base as base class SyncWorker(base.Worker): def run(self): - self.nr = 0 - # self.socket appears to lose its blocking status after # we fork in the arbiter. Reset it here. self.socket.setblocking(0) while self.alive: - self.nr = 0 self.notify() # Accept a connection. If we get an error telling us @@ -37,17 +34,16 @@ class SyncWorker(base.Worker): client.setblocking(1) util.close_on_exec(client) self.handle(client, addr) - self.nr += 1 + + # Keep processing clients until no one is waiting. This + # prevents the need to select() for every client that we + # process. + continue + except socket.error, e: if e[0] not in (errno.EAGAIN, errno.ECONNABORTED): raise - # Keep processing clients until no one is waiting. This - # prevents the need to select() for every client that we - # process. - if self.nr > 0: - continue - # If our parent changed then we shut down. if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s" % self) @@ -97,6 +93,11 @@ class SyncWorker(base.Worker): self.cfg.pre_request(self, req) resp, environ = wsgi.create(req, client, addr, self.address, self.cfg) + self.nr += 1 + if self.nr >= self.max_requests: + self.log.info("Autorestarting worker after current request.") + resp.force_close() + self.alive = False respiter = self.wsgi(environ, resp.start_response) for item in respiter: resp.write(item)