本篇文章将分析start方法中还剩下的两条语句的代码流程,代码如下:
# usr/lib/python2.7/site-packages/oslo_messaging/server.py def start(self): """Start handling incoming messages. This method causes the server to begin polling the transport for incoming messages and passing them to the dispatcher. Message processing will continue until the stop() method is called. The executor controls how the server integrates with the applications I/O handling strategy - it may choose to poll for messages in a new process, thread or co-operatively scheduled coroutine or simply by registering a callback with an event loop. Similarly, the executor may choose to dispatch messages in a new thread, coroutine or simply the current thread. """ if self._executor is not None: return try: listener = self.dispatcher._listen(self.transport) except driver_base.TransportDriverError as ex: raise ServerListenError(self.target, ex) self._executor = self._executor_cls(self.conf, listener, self.dispatcher) self._executor.start()
这里self._excutor_cls为oslo_messaging._executors.impl_eventlet:EventletExecutor类名。所以self._executor是EventletExecutor对象。
# usr/lib/python2.7/site-packages/oslo_messaging/_executors/impl_eventlet.py class EventletExecutor(base.PooledExecutorBase): """A message executor which integrates with eventlet. This is an executor which polls for incoming messages from a greenthread and dispatches each message in its own greenthread. The stop() method kills the message polling greenthread and the wait() method waits for all message dispatch greenthreads to complete. """ def __init__(self, conf, listener, dispatcher): super(EventletExecutor, self).__init__(conf, listener, dispatcher) self._thread = None self._greenpool = greenpool.GreenPool(self.conf.rpc_thread_pool_size) self._running = False if not isinstance(localcontext._STORE, greenthreading.local): LOG.debug('eventlet executor in use but the threading module ' 'has not been monkeypatched or has been ' 'monkeypatched after the oslo.messaging library ' 'have been loaded. This will results in unpredictable ' 'behavior. In the future, we will raise a ' 'RuntimeException in this case.') def start(self): if self._thread is not None: return @excutils.forever_retry_uncaught_exceptions def _executor_thread(): try: while self._running: incoming = self.listener.poll() if incoming is not None: self._dispatch(incoming) except greenlet.GreenletExit: return self._running = True self._thread = eventlet.spawn(_executor_thread)
在执行start方法时,会创建一个协程用于检测消息是否到达,当有消息到达时,对不同的消息创建不同的多个协程用于发送message。下面我们首先分析监听message到来的poll()方法。
/usr/lib/python2.7/site-packages/oslo_messaging/_drivers/amqpdriver.py:AMQPListener def poll(self, timeout=None): while not self._stopped.is_set(): if self.incoming: return self.incoming.pop(0) try: self.conn.consume(limit=1, timeout=timeout) except rpc_common.Timeout: return None #/usr/lib/python2.7/site-packages/oslo_messaging/_drivers/impl_rabbit.py:Connection def consume(self, limit=None, timeout=None): """Consume from all queues/consumers.""" with self._connection_lock: it = self.iterconsume(limit=limit, timeout=timeout) while True: try: six.next(it) except StopIteration: return def iterconsume(self, limit=None, timeout=None): """Return an iterator that will consume from all queues/consumers. NOTE(sileht): Must be called within the connection lock """ timer = rpc_common.DecayingTimer(duration=timeout) timer.start() def _raise_timeout(exc): LOG.debug('Timed out waiting for RPC response: %s', exc) raise rpc_common.Timeout() def _recoverable_error_callback(exc): self.do_consume = True timer.check_return(_raise_timeout, exc) def _error_callback(exc): _recoverable_error_callback(exc) LOG.error(_('Failed to consume message from queue: %s'), exc) def _consume(): if self.do_consume: queues_head = self.consumers[:-1] # not fanout. queues_tail = self.consumers[-1] # fanout for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) self.do_consume = False poll_timeout = (self._poll_timeout if timeout is None else min(timeout, self._poll_timeout)) while True: if self._consume_loop_stopped: self._consume_loop_stopped = False raise StopIteration if self._heartbeat_supported_and_enabled(): self.connection.heartbeat_check( rate=self.driver_conf.heartbeat_rate) try: return self.connection.drain_events(timeout=poll_timeout) except socket.timeout as exc: poll_timeout = timer.check_return( _raise_timeout, exc, maximum=self._poll_timeout) for iteration in itertools.count(0): if limit and iteration >= limit: raise StopIteration yield self.ensure( _consume, recoverable_error_callback=_recoverable_error_callback, error_callback=_error_callback)
这里继续往下调用会调用到kombu层的autoretry方法,这里就不贴出代码了,最后autoretry通过传递下去的_consume方法名,回调到oslo_messaging层的_consume方法,该方法在iterconsume方法中,所以我们具体分析iterconsume方法中的_consume方法。
这里self.consumers是存放了之前创建的consumer的列表,对于Nova-scheduler组件而言,首先创建了两个topic方式的consumer添加到self.consumers列表中,然后再创建了一个fanout方式的consumer添加到self.consumers列表中。然后取出相应的consumer调用ConsumerBase类的consume方法。
#/usr/lib/python2.7/site-packages/oslo_messaging/_drivers/impl_rabbit.py:ConsumerBase def consume(self, *args, **kwargs): """Actually declare the consumer on the amqp channel. This will start the flow of messages from the queue. Using the Connection.iterconsume() iterator will process the messages, calling the appropriate callback. If a callback is specified in kwargs, use that. Otherwise, use the callback passed during __init__() If kwargs['nowait'] is True, then this call will block until a message is read. """ options = {'consumer_tag': self.tag} options['nowait'] = kwargs.get('nowait', False) callback = kwargs.get('callback', self.callback) if not callback: raise ValueError("No callback defined") def _callback(message): m2p = getattr(self.channel, 'message_to_python', None) if m2p: message = m2p(message) self._callback_handler(message, callback) self.queue.consume(*args, callback=_callback, **options) def _callback_handler(self, message, callback): """Call callback with deserialized message. Messages that are processed and ack'ed. """ try: callback(RabbitMessage(message)) except Exception: LOG.exception(_("Failed to process message" " ... skipping it.")) message.ack()
#/usr/lib/python2.7/site-packages/kombu/entity.py:Queue def consume(self, consumer_tag='', callback=None, no_ack=None, nowait=False): """Start a queue consumer. Consumers last as long as the channel they were created on, or until the client cancels them. :keyword consumer_tag: Unique identifier for the consumer. The consumer tag is local to a connection, so two clients can use the same consumer tags. If this field is empty the server will generate a unique tag. :keyword no_ack: If enabled the broker will automatically ack messages. :keyword nowait: Do not wait for a reply. :keyword callback: callback called for each delivered message """ if no_ack is None: no_ack = self.no_ack return self.channel.basic_consume(queue=self.name, no_ack=no_ack, consumer_tag=consumer_tag or '', callback=callback, nowait=nowait)
#/usr/lib/python2.7/site-packages/amqp/channel.py def basic_consume(self, queue='', consumer_tag='', no_local=False, no_ack=False, exclusive=False, nowait=False, callback=None, arguments=None, on_cancel=None): """Start a queue consumer This method asks the server to start a "consumer", which is a transient request for messages from a specific queue. Consumers last as long as the channel they were created on, or until the client cancels them. RULE: The server SHOULD support at least 16 consumers per queue, unless the queue was declared as private, and ideally, impose no limit except as defined by available resources. PARAMETERS: queue: shortstr Specifies the name of the queue to consume from. If the queue name is null, refers to the current queue for the channel, which is the last declared queue. RULE: If the client did not previously declare a queue, and the queue name in this method is empty, the server MUST raise a connection exception with reply code 530 (not allowed). consumer_tag: shortstr Specifies the identifier for the consumer. The consumer tag is local to a connection, so two clients can use the same consumer tags. If this field is empty the server will generate a unique tag. RULE: The tag MUST NOT refer to an existing consumer. If the client attempts to create two consumers with the same non-empty tag the server MUST raise a connection exception with reply code 530 (not allowed). no_local: boolean do not deliver own messages If the no-local field is set the server will not send messages to the client that published them. no_ack: boolean no acknowledgement needed If this field is set the server does not expect acknowledgments for messages. That is, when a message is delivered to the client the server automatically and silently acknowledges it on behalf of the client. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. exclusive: boolean request exclusive access Request exclusive consumer access, meaning only this consumer can access the queue. RULE: If the server cannot grant exclusive access to the queue when asked, - because there are other consumers active - it MUST raise a channel exception with return code 403 (access refused). nowait: boolean do not send a reply method If set, the server will not respond to the method. The client should not wait for a reply method. If the server could not complete the method it will raise a channel or connection exception. callback: Python callable function/method called with each delivered message For each message delivered by the broker, the callable will be called with a Message object as the single argument. If no callable is specified, messages are quietly discarded, no_ack should probably be set to True in that case. """ args = AMQPWriter() args.write_short(0) args.write_shortstr(queue) args.write_shortstr(consumer_tag) args.write_bit(no_local) args.write_bit(no_ack) args.write_bit(exclusive) args.write_bit(nowait) args.write_table(arguments or {}) self._send_method((60, 20), args) if not nowait: consumer_tag = self.wait(allowed_methods=[ (60, 21), # Channel.basic_consume_ok ]) self.callbacks[consumer_tag] = callback if on_cancel: self.cancel_callbacks[consumer_tag] = on_cancel if no_ack: self.no_ack_consumers.add(consumer_tag) return consumer_tag
这里kombu层Queue的consume方法调用amqp层channel的basic_consume方法,该方法会将callback函数保存下来self.callbacks[consumer_tag] =callback,等待message的到来,从而调用callback函数。而等待message的代码则是在self.connection.drain_events(timeout=poll_timeout)方法进行的。
#/usr/lib/python2.7/site-packages/kombu/connection.py:Connection def drain_events(self, **kwargs): """Wait for a single event from the server. :keyword timeout: Timeout in seconds before we give up. Raises :exc:`socket.timeout` if the timeout is exceeded. Usually used from an event loop. """ return self.transport.drain_events(self.connection, **kwargs) #/usr/lib/python2.7/site-packages/kombu/transport/pyamqp.py:Transport def drain_events(self, connection, **kwargs): return connection.drain_events(**kwargs)
这样,kombu层会调用amqp层的wait message的方法,这里我们就不往amqp层看了,大致流程是这样的:当相应的queue中message到达时,Rabbit-server会将message和consumer需操作的方法所对应的method_sig通过socket传输给consumer,consumer将method_sig转换成相应的amqp_method名,然后调用amqp_method方法对message做相应的处理。
下面我们举例解释一下消息的处理流程:
1.当queue中有message到达时,Rabbit-server通过socket传输message和method_sig,该method_sig一般是(60, 60),而(60, 60)所对应的amqp_method为_basic_deliver方法名。因此在consumer侧将执行_basic_deliver方法。_basic_deliver方法的代码流程如下:
#/usr/lib/python2.7/site-packages/amqp/channel.py def _basic_deliver(self, args, msg): """Notify the client of a consumer message This method delivers a message to the client, via a consumer. In the asynchronous message delivery model, the client starts a consumer using the Consume method, then the server responds with Deliver methods as and when messages arrive for that consumer. RULE: The server SHOULD track the number of times a message has been delivered to clients and when a message is redelivered a certain number of times - e.g. 5 times - without being acknowledged, the server SHOULD consider the message to be unprocessable (possibly causing client applications to abort), and move the message to a dead letter queue. PARAMETERS: consumer_tag: shortstr consumer tag Identifier for the consumer, valid within the current connection. RULE: The consumer tag is valid only within the channel from which the consumer was created. I.e. a client MUST NOT create a consumer in one channel and then use it in another. delivery_tag: longlong server-assigned delivery tag The server-assigned and channel-specific delivery tag RULE: The delivery tag is valid only within the channel from which the message was received. I.e. a client MUST NOT receive a message on one channel and then acknowledge it on another. RULE: The server MUST NOT use a zero value for delivery tags. Zero is reserved for client use, meaning "all messages so far received". redelivered: boolean message is being redelivered This indicates that the message has been previously delivered to this or another client. exchange: shortstr Specifies the name of the exchange that the message was originally published to. routing_key: shortstr Message routing key Specifies the routing key name specified when the message was published. """ consumer_tag = args.read_shortstr() delivery_tag = args.read_longlong() redelivered = args.read_bit() exchange = args.read_shortstr() routing_key = args.read_shortstr() msg.channel = self msg.delivery_info = { 'consumer_tag': consumer_tag, 'delivery_tag': delivery_tag, 'redelivered': redelivered, 'exchange': exchange, 'routing_key': routing_key, } try: fun = self.callbacks[consumer_tag] except KeyError: pass else: fun(msg)
就拿Nova-scheduler组件而言,且从上面的consume的代码流程可以看出,Nova-scheduler组件的3个consumer的callback方法都是/usr/lib/python2.7/site-packages/oslo_messaging/_drivers/impl_rabbit.py:ConsumerBase中consume方法中的_callback方法,所以这里我们拿Nova-scheduler组件中的topic为scheduler的consumer举例说明,它所对应的consumer_tag为1,所以当执行fun(msg)语句时,将调用_callback函数。
2. _callback方法的调用
#/usr/lib/python2.7/site-packages/oslo_messaging/_drivers/impl_rabbit.py:ConsumerBase def _callback(message): m2p = getattr(self.channel, 'message_to_python', None) if m2p: message = m2p(message) self._callback_handler(message, callback) self.queue.consume(*args, callback=_callback, **options) def _callback_handler(self, message, callback): """Call callback with deserialized message. Messages that are processed and ack'ed. """ try: callback(RabbitMessage(message)) except Exception: LOG.exception(_("Failed to process message" " ... skipping it.")) message.ack()
这里,首先将encode的message body转换为python可识别的message,然后调用_callback_handler方法,该方法将执行callback方法,而根据的创建consumer的代码流程,我们知道,这个callback是AMQPListener对象。所以执行callback(RabbitMessage(message))语句将执行AMQPListener类的__call__方法。
3. 执行AMQPListener类的__call__方法
#/usr/lib/python2.7/site-packages/oslo_messaging/_drivers/amqpdriver.py:AMQPListener def __call__(self, message): ctxt = rpc_amqp.unpack_context(self.conf, message) # FIXME(sileht): Don't log the message until strutils is more # efficient, (rpc_amqp.unpack_context already log the context) # LOG.debug(u'received: %s', # strutils.mask_password(six.text_type(dict(message)))) unique_id = self.msg_id_cache.check_duplicate_message(message) self.incoming.append(AMQPIncomingMessage(self, ctxt.to_dict(), message, unique_id, ctxt.msg_id, ctxt.reply_q))
此时,consumer将到达的message放到incoming的列表中,因此amqp层的_basic_deliver的方法执行完成,consumer侧将return到oslo_messaging层,进行incoming中message的进一步处理。
4. incoming列表中的message处理
#/usr/lib/python2.7/site-packages/oslo_messaging/_drivers/amqpdriver.py:AMQPListener def poll(self, timeout=None): while not self._stopped.is_set(): if self.incoming: return self.incoming.pop(0) try: self.conn.consume(limit=1, timeout=timeout) except rpc_common.Timeout: return None
因此,self.conn.consume(limit=1, timeout=timeout)语句执行完成,且在return该语句之前,consumer已经将queue中的message存入到self.incoming列表中。此时执行return self.incoming.pop(0)将return到usr/lib/python2.7/site-packages/oslo_messaging/_executors/impl_eventlet.py的start函数中。
#usr/lib/python2.7/site-packages/oslo_messaging/_executors/impl_eventlet.py def start(self): if self._thread is not None: return @excutils.forever_retry_uncaught_exceptions def _executor_thread(): try: while self._running: incoming = self.listener.poll() if incoming is not None: self._dispatch(incoming) except greenlet.GreenletExit: return def _dispatch(self, incoming): spawn_with(ctxt=self.dispatcher(incoming), pool=self._greenpool)
这里,self.listener.poll()将收到的message赋给incoming,然后将message进行dispatch。其中self.dispatcher是RPCDispatcher对象,所以执行RPCDispatcher类所对应的__call__方法,如下:
#usr/lib/python2.7/site-packages/oslo_messaging/rpc/dispatcher.py @contextlib.contextmanager def __call__(self, incoming, executor_callback=None): incoming.acknowledge() yield lambda: self._dispatch_and_reply(incoming, executor_callback)
#usr/lib/python2.7/site-packages/oslo_messaging/_executors/impl_eventlet.py def spawn_with(ctxt, pool): """This is the equivalent of a with statement but with the content of the BLOCK statement executed into a greenthread exception path grab from: http://www.python.org/dev/peps/pep-0343/ """ def complete(thread, exit): exc = True try: try: thread.wait() except Exception: exc = False if not exit(*sys.exc_info()): raise finally: if exc: exit(None, None, None) callback = ctxt.__enter__() thread = pool.spawn(callback) thread.link(complete, ctxt.__exit__) return thread
将spawn_with与__call__方法结合起来分析代码流程,这里contextlib.contextmanager装饰器来修饰__call__方法,该装饰器的作用为:所有位于yield之前的代码会作为上下文管理器的__enter__()方法来执行,而所有位于yield之后的代码会作为__exit__方法执行。因此spawn_with方法中的callback = ctxt.__enter__()语句将执行__call__方法中的incoming.acknowledge(),同时通过thread =pool.spawn(callback)来创建协程执行__call__中yield所带的lambda函数,即执行self._dispatch_and_reply(incoming,executor_callback)。由于这里的yield语句后并没有其他代码,所以ctxt.__exit__没有__call__中的所执行的代码。
我们这里主要分析self._dispatch_and_reply(incoming,executor_callback)的代码流程。
#usr/lib/python2.7/site-packages/oslo_messaging/rpc/dispatcher.py def _do_dispatch(self, endpoint, method, ctxt, args, executor_callback): ctxt = self.serializer.deserialize_context(ctxt) new_args = dict() for argname, arg in six.iteritems(args): new_args[argname] = self.serializer.deserialize_entity(ctxt, arg) func = getattr(endpoint, method) if executor_callback: result = executor_callback(func, ctxt, **new_args) else: result = func(ctxt, **new_args) return self.serializer.serialize_entity(ctxt, result) def _dispatch_and_reply(self, incoming, executor_callback): try: incoming.reply(self._dispatch(incoming.ctxt, incoming.message, executor_callback)) except ExpectedException as e: LOG.debug(u'Expected exception during message handling (%s)', e.exc_info[1]) incoming.reply(failure=e.exc_info, log_failure=False) except Exception as e: # sys.exc_info() is deleted by LOG.exception(). exc_info = sys.exc_info() LOG.error(_('Exception during message handling: %s'), e, exc_info=exc_info) incoming.reply(failure=exc_info) # NOTE(dhellmann): Remove circular object reference # between the current stack frame and the traceback in # exc_info. del exc_info def _dispatch(self, ctxt, message, executor_callback=None): """Dispatch an RPC message to the appropriate endpoint method. :param ctxt: the request context :type ctxt: dict :param message: the message payload :type message: dict :raises: NoSuchMethod, UnsupportedVersion """ method = message.get('method') args = message.get('args', {}) namespace = message.get('namespace') version = message.get('version', '1.0') found_compatible = False for endpoint in self.endpoints: target = getattr(endpoint, 'target', None) if not target: target = self._default_target if not (self._is_namespace(target, namespace) and self._is_compatible(target, version)): continue if hasattr(endpoint, method): localcontext.set_local_context(ctxt) try: return self._do_dispatch(endpoint, method, ctxt, args, executor_callback) finally: localcontext.clear_local_context() found_compatible = True if found_compatible: raise NoSuchMethod(method) else: raise UnsupportedVersion(version, method=method)
在_dispatch方法中主要从endpoints中找到相应的要执行的method,这里遍历所有的endpoints,首先从endpoint中得到target,如果没有则采用default的target,因为endpoint中的对象可以构造target属性,当client需调用该endpoint中的method时,可以通过prepare方法修改之前创建的target属性,从而调用对于的endpoint中的method。这里比较message中的namespace,version是否与endpoint中target的namespace,version相匹配,如果相匹配,则执行hasattr(endpoint, method)语句查看该endpoint中是否有相应的method,如果有则执行_do_dispatch方法。(endpoints是列表,包括很多类对象,每个对象中包括很多方法)。
在_do_dispatch方法中则为执行找到的method,首先执行func = getattr(endpoint, method)得到要执行函数的函数名,由于这里executor_callback为None,所以直接执行该函数:result = func(ctxt, **new_args),最后将执行的结果return到_dispatch_and_reply方法中。
5. reply结果到对端
此时,我们需要将执行的结果返回给OpenStack-RPC-client端,即创建一个direct方式的publish通过RabbitMQ将结果传输给OpenStack-RPC-client端创建的direct方式的consumer(这里OpenStack-RPC-client端采用的call方法来调用OpenStack-RPC-server端的函数才会返回结果且创建该这一套连接,如果OpenStack-RPC-client端采用的cast方法,则OpenStack-RPC-server端不会返回结果)。
下面我们分析reply的流程。即 incoming.reply(self._dispatch(incoming.ctxt,incoming.message, executor_callback))的代码流程。#/oslo_messaging/_drivers/amqpdriver.py:AMQPIncomingMessage def reply(self, reply=None, failure=None, log_failure=True): if not self.msg_id: # NOTE(Alexei_987) not sending reply, if msg_id is empty # because reply should not be expected by caller side return with self.listener.driver._get_connection( rpc_amqp.PURPOSE_SEND) as conn: self._send_reply(conn, reply, failure, log_failure=log_failure) self._send_reply(conn, ending=True) def _send_reply(self, conn, reply=None, failure=None, ending=False, log_failure=True): if failure: failure = rpc_common.serialize_remote_exception(failure, log_failure) msg = {'result': reply, 'failure': failure} if ending: msg['ending'] = True rpc_amqp._add_unique_id(msg) # If a reply_q exists, add the msg_id to the reply and pass the # reply_q to direct_send() to use it as the response queue. # Otherwise use the msg_id for backward compatibility. if self.reply_q: msg['_msg_id'] = self.msg_id conn.direct_send(self.reply_q, rpc_common.serialize_msg(msg)) else: conn.direct_send(self.msg_id, rpc_common.serialize_msg(msg))
从上面的reply方法可以看出,它首先建立到RabbitMQ-server的连接,然后调用两次_send_reply方法,第一次是reply结果到对端,第二次是通知对端reply结束(ending=True)。
进入_send_reply方法,这里我们重点关注调用call方法时的reply的执行流程,所以self.reply_q有值(reply_q就是对端的exchange和queue的名称),因此,执行conn.direct_send(self.reply_q, rpc_common.serialize_msg(msg)),对于conn.direct_send(self.msg_id, rpc_common.serialize_msg(msg)),目前,我也不知道有何作用。下面我们具体分析前一个direct_send的代码流程。
#/oslo_messaging/_drivers/impl_rabbit.py:Connection def direct_send(self, msg_id, msg): """Send a 'direct' message.""" timer = rpc_common.DecayingTimer(duration=60) timer.start() # NOTE(sileht): retry at least 60sec, after we have a good change # that the caller is really dead too... while True: try: self.publisher_send(DirectPublisher, msg_id, msg, error_callback=None) return except self.connection.channel_errors as exc: # NOTE(noelbk/sileht): # If rabbit dies, the consumer can be disconnected before the # publisher sends, and if the consumer hasn't declared the # queue, the publisher's will send a message to an exchange # that's not bound to a queue, and the message wll be lost. # So we set passive=True to the publisher exchange and catch # the 404 kombu ChannelError and retry until the exchange # appears if exc.code == 404 and timer.check_return() > 0: LOG.info(_LI("The exchange to reply to %s doesn't " "exist yet, retrying...") % msg_id) time.sleep(1) continue self._log_publisher_send_error(msg_id, exc) raise except Exception as exc: self._log_publisher_send_error(msg_id, exc) raise def publisher_send(self, cls, topic, msg, timeout=None, retry=None, error_callback=default_marker, **kwargs): """Send to a publisher based on the publisher class.""" def _default_error_callback(exc): self._log_publisher_send_error(topic, exc) if error_callback is self.default_marker: error_callback = _default_error_callback def _publish(): publisher = cls(self.driver_conf, self.channel, topic=topic, **kwargs) publisher.send(msg, timeout) with self._connection_lock: self.ensure(_publish, retry=retry, error_callback=error_callback)
这里direct_send方法调用publish_send方法,publish_send方法进入ensure方法,最终回调到publish_send方法中的_publish方法。_publish方法首先创建DirectPublisher对象,然后利用DirectPublisher对象send reply结果到对端。
a. DirectPublisher对象的创建
#/oslo_messaging/_drivers/impl_rabbit.py:DirectPublisher class DirectPublisher(Publisher): """Publisher class for 'direct'.""" def __init__(self, conf, channel, topic, **kwargs): """Init a 'direct' publisher. Kombu options may be passed as keyword args to override defaults """ options = {'durable': False, 'auto_delete': True, 'exclusive': False, 'passive': True} options.update(kwargs) super(DirectPublisher, self).__init__(channel, topic, topic, type='direct', **options) #/oslo_messaging/_drivers/impl_rabbit.py:Publisher class Publisher(object): """Base Publisher class.""" def __init__(self, channel, exchange_name, routing_key, **kwargs): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.exchange_name = exchange_name self.routing_key = routing_key self.kwargs = kwargs self.reconnect(channel) def reconnect(self, channel): """Re-establish the Producer after a rabbit reconnection.""" self.exchange = kombu.entity.Exchange(name=self.exchange_name, **self.kwargs) self.producer = kombu.messaging.Producer(exchange=self.exchange, channel=channel, routing_key=self.routing_key)
这里DirectPublisher类中options字典的key passive的为True,这是一个bug修复,原因是这样的:我们知道exchange和queue的创建都是在consumer侧执行的,只有在consumer侧的exchange和queue创建完成,publisher侧才能向指定的exchange发送message(当然publisher也会创建exchange),否则,发送的message会丢失。我们这里设置passive为True是为了修复这样的bug:RabbitMQ-server与consumer和publisher断开连接后,在consumer还未创建完成exchange和queue之前,publisher向该exchange发送message,导致message丢失。设置passive为True就是阻止publisher创建exchange,让consumer来创建exchange和queue。且设置passive为True会导致publisher在创建exchange时,RabbitMQ-server会raise一个404的错误。即这就是上面direct_send方法会去设置一个60s的定时器的原因,它会去按1s的间隔去检测consumer侧的exchange和queue是否创建完成,如在60s内创建完成,则发送message到exchange上。
好了,我们继续我们的代码流程分析。在更新了options字典之后,DirectPublisher类调用父类Publish的代码,其中在父类Publish的reconnect方法中,创建了kombu层的Exchange和Producer对象。Exchange对象的创建代码流程不用分析,具体看前面的文章。下面主要分析Producer对象的创建。
#/kombu/messaging.py:Producer class Producer(object): """Message Producer. :param channel: Connection or channel. :keyword exchange: Optional default exchange. :keyword routing_key: Optional default routing key. :keyword serializer: Default serializer. Default is `"json"`. :keyword compression: Default compression method. Default is no compression. :keyword auto_declare: Automatically declare the default exchange at instantiation. Default is :const:`True`. :keyword on_return: Callback to call for undeliverable messages, when the `mandatory` or `immediate` arguments to :meth:`publish` is used. This callback needs the following signature: `(exception, exchange, routing_key, message)`. Note that the producer needs to drain events to use this feature. """ #: Default exchange exchange = None #: Default routing key. routing_key = '' #: Default serializer to use. Default is JSON. serializer = None #: Default compression method. Disabled by default. compression = None #: By default the exchange is declared at instantiation. #: If you want to declare manually then you can set this #: to :const:`False`. auto_declare = True #: Basic return callback. on_return = None #: Set if channel argument was a Connection instance (using #: default_channel). __connection__ = None def __init__(self, channel, exchange=None, routing_key=None, serializer=None, auto_declare=None, compression=None, on_return=None): self._channel = channel self.exchange = exchange self.routing_key = routing_key or self.routing_key self.serializer = serializer or self.serializer self.compression = compression or self.compression self.on_return = on_return or self.on_return self._channel_promise = None if self.exchange is None: self.exchange = Exchange('') if auto_declare is not None: self.auto_declare = auto_declare if self._channel: self.revive(self._channel) def revive(self, channel): """Revive the producer after connection loss.""" if is_connection(channel): connection = channel self.__connection__ = connection channel = ChannelPromise(lambda: connection.default_channel) if isinstance(channel, ChannelPromise): self._channel = channel self.exchange = self.exchange(channel) else: # Channel already concrete self._channel = channel if self.on_return: self._channel.events['basic_return'].add(self.on_return) self.exchange = self.exchange(channel) if self.auto_declare: # auto_decare is not recommended as this will force # evaluation of the channel. self.declare()
这里,由于传递进来的channel已经创建完成,所以会执行revive方法,在revive方法中会检测auto_declare的值,由于auto_declare在本场景下采用默认值:True,所以执行self.declare()方法,该方法就是在producer侧创建amqp层的Exchange对象。我们往下分析,看是否是创建amqp层的Exchange对象。
#/kombu/messaging.py:Producer def declare(self): """Declare the exchange. This happens automatically at instantiation if :attr:`auto_declare` is enabled. """ if self.exchange.name: self.exchange.declare() #/kombu/entity.py:Exchange def declare(self, nowait=False, passive=None): """Declare the exchange. Creates the exchange on the broker. :keyword nowait: If set the server will not respond, and a response will not be waited for. Default is :const:`False`. """ passive = self.passive if passive is None else passive if self.name: return self.channel.exchange_declare( exchange=self.name, type=self.type, durable=self.durable, auto_delete=self.auto_delete, arguments=self.arguments, nowait=nowait, passive=passive, )
因为我们在上面创建producer侧的Exchange对象时,指定self.passive的值为True。所以在amqp层不会创建Exchange。而是由RabbMQ-server返回一个404的异常错误。我们这里就不贴amqp层创建Exchange的代码了,我们把passive参数的注释贴出来。
#/amqp/channel.py:Channel.exchange_declare passive: boolean do not create exchange If set, the server will not create the exchange. The client can use this to check whether an exchange exists without modifying the server state. RULE: If set, and the exchange does not already exist, the server MUST raise a channel exception with reply code 404 (not found).
至此,DirectPublisher对象的创建完成,下面分析利用DirectPublisher对象send reply结果到对端的代码流程。
b.send 消息到对端
#/oslo_messaging/_drivers/impl_rabbit.py:Connection def send(self, msg, timeout=None): """Send a message.""" if timeout: # # AMQP TTL is in milliseconds when set in the header. # self.producer.publish(msg, headers={'ttl': (timeout * 1000)}) else: self.producer.publish(msg)
这个Publish消息很简单,我们就不分析了。有兴趣的读者可以往下继续分析。
至此,消息的处理流程分析完成。
总结:本文主要分析start方法中还剩下的两条语句的代码流程,其主要作用是将callback函数传递到amqp层进行保存,然后wait message的到来,当message到达后,通过callback函数回调对message进行处理。其中,我们还简要分析了消息的处理流程。
至此,OpenStack-RPC-server的创建流程分析完成了。其流程为:
1. 创建到amqp层的connection和channel。
2. 创建consumer,包括exchange,queue以及它们的binding。
3. 等待message的到来,然后进行处理。
4. 处理完成后,reply结果到对端。
后面我们分析OpenStack-RPC-Client的创建流程。