optimize the sync worker

This change optimize the sync worker when we only have to listen on one
interface. While I'm here, I fixed a long and unnoticed outstanding issue when
we were accepting on multiple interfaces (wonder if someone really use it), at
some point soe interfaces were skipped.
This commit is contained in:
benoitc 2014-10-25 11:58:28 +02:00
parent c152ce0dd0
commit 4c601ce447

View File

@ -21,13 +21,38 @@ from gunicorn import six
class SyncWorker(base.Worker): class SyncWorker(base.Worker):
def run(self): def accept(self, listener):
# self.socket appears to lose its blocking status after client, addr = listener.accept()
# we fork in the arbiter. Reset it here. client.setblocking(1)
for s in self.sockets: util.close_on_exec(client)
s.setblocking(0) self.handle(listener, client, addr)
ready = self.sockets def wait(self, timeout):
try:
self.notify()
ret = select.select(self.sockets, [], self.PIPE, timeout)
if ret[0]:
return ret[0]
except select.error as e:
if e.args[0] == errno.EINTR:
return self.sockets
if e.args[0] == errno.EBADF:
if self.nr < 0:
return self.sockets
else:
return False
raise
def is_parent_alive(self):
# If our parent changed then we shut down.
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
return False
return True
def run_for_one(self, timeout):
listener = self.sockets[0]
while self.alive: while self.alive:
self.notify() self.notify()
@ -35,14 +60,8 @@ class SyncWorker(base.Worker):
# that no connection is waiting we fall down to the # that no connection is waiting we fall down to the
# select which is where we'll wait for a bit for new # select which is where we'll wait for a bit for new
# workers to come give us some love. # workers to come give us some love.
for sock in ready:
try: try:
client, addr = sock.accept() self.accept(listener)
client.setblocking(1)
util.close_on_exec(client)
self.handle(sock, client, addr)
# Keep processing clients until no one is waiting. This # Keep processing clients until no one is waiting. This
# prevents the need to select() for every client that we # prevents the need to select() for every client that we
# process. # process.
@ -53,33 +72,45 @@ class SyncWorker(base.Worker):
errno.EWOULDBLOCK): errno.EWOULDBLOCK):
raise raise
# If our parent changed then we shut down. if not self.is_parent_alive():
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
return return
try: if not self.wait(timeout):
return
def run_for_multiple(self, timeout):
while self.alive:
self.notify() self.notify()
ready = self.wait(timeout)
if not ready:
return
for listener in ready:
try:
self.accept(listener)
except socket.error as e:
if e.args[0] not in (errno.EAGAIN, errno.ECONNABORTED,
errno.EWOULDBLOCK):
raise
if not self.is_parent_alive():
return
def run(self):
# if no timeout is given the worker will never wait and will # if no timeout is given the worker will never wait and will
# use the CPU for nothing. This minimal timeout prevent it. # use the CPU for nothing. This minimal timeout prevent it.
timeout = self.timeout or 0.5 timeout = self.timeout or 0.5
ret = select.select(self.sockets, [], self.PIPE, timeout) # self.socket appears to lose its blocking status after
if ret[0]: # we fork in the arbiter. Reset it here.
ready = ret[0] for s in self.sockets:
continue s.setblocking(0)
except select.error as e:
if e.args[0] == errno.EINTR: if len(self.sockets) > 1:
ready = self.sockets self.run_for_multiple(timeout)
continue
if e.args[0] == errno.EBADF:
if self.nr < 0:
ready = self.sockets
continue
else: else:
return self.run_for_one(timeout)
raise
def handle(self, listener, client, addr): def handle(self, listener, client, addr):
req = None req = None