version 1.88.2.1, 2005/02/17 22:40:18
|
version 1.119.12.2, 2006/06/29 21:02:17
|
|
|
//%2005//////////////////////////////////////////////////////////////////////// |
//%2006//////////////////////////////////////////////////////////////////////// |
// | // |
// Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development | // Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development |
// Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems. | // Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems. |
|
|
// IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group. | // IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group. |
// Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.; | // Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.; |
// EMC Corporation; VERITAS Software Corporation; The Open Group. | // EMC Corporation; VERITAS Software Corporation; The Open Group. |
|
// Copyright (c) 2006 Hewlett-Packard Development Company, L.P.; IBM Corp.; |
|
// EMC Corporation; Symantec Corporation; The Open Group. |
// | // |
// Permission is hereby granted, free of charge, to any person obtaining a copy | // Permission is hereby granted, free of charge, to any person obtaining a copy |
// of this software and associated documentation files (the "Software"), to | // of this software and associated documentation files (the "Software"), to |
|
|
// Author: Mike Day (mdday@us.ibm.com) | // Author: Mike Day (mdday@us.ibm.com) |
// | // |
// Modified By: | // Modified By: |
// Amit K Arora, IBM (amita@in.ibm.com) for Bug#1090 |
// Amit K Arora, IBM (amita@in.ibm.com) for Bug#1090,#2657 |
|
// Josephine Eskaline Joyce, IBM (jojustin@in.ibm.com) for Bug#3259 |
|
// Jim Wunderlich (Jim_Wunderlich@prodigy.net) |
// | // |
//%///////////////////////////////////////////////////////////////////////////// | //%///////////////////////////////////////////////////////////////////////////// |
| |
|
// #include <iostream.h> |
#include "MessageQueueService.h" | #include "MessageQueueService.h" |
#include <Pegasus/Common/Tracer.h> | #include <Pegasus/Common/Tracer.h> |
#include <Pegasus/Common/MessageLoader.h> //l10n | #include <Pegasus/Common/MessageLoader.h> //l10n |
| |
PEGASUS_NAMESPACE_BEGIN | PEGASUS_NAMESPACE_BEGIN |
| |
|
|
cimom *MessageQueueService::_meta_dispatcher = 0; | cimom *MessageQueueService::_meta_dispatcher = 0; |
AtomicInt MessageQueueService::_service_count = 0; |
AtomicInt MessageQueueService::_service_count(0); |
AtomicInt MessageQueueService::_xid(1); |
Mutex MessageQueueService::_xidMutex; |
|
Uint32 MessageQueueService::_xid = 1; |
Mutex MessageQueueService::_meta_dispatcher_mutex; | Mutex MessageQueueService::_meta_dispatcher_mutex; |
| |
static struct timeval create_time = {0, 1}; |
static struct timeval deallocateWait = {300, 0}; |
static struct timeval destroy_time = {300, 0}; |
|
static struct timeval deadlock_time = {0, 0}; |
|
| |
ThreadPool *MessageQueueService::_thread_pool = 0; | ThreadPool *MessageQueueService::_thread_pool = 0; |
| |
DQueue<MessageQueueService> MessageQueueService::_polling_list(true); |
List<MessageQueueService, RMutex> MessageQueueService::_polling_list; |
| |
Thread* MessageQueueService::_polling_thread = 0; | Thread* MessageQueueService::_polling_thread = 0; |
| |
ThreadPool *MessageQueueService::get_thread_pool(void) |
ThreadPool *MessageQueueService::get_thread_pool() |
{ | { |
return _thread_pool; | return _thread_pool; |
} | } |
| |
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::kill_idle_threads(void *parm) |
// |
|
// MAX_THREADS_PER_SVC_QUEUE |
|
// |
|
// JR Wunderlich Jun 6, 2005 |
|
// |
|
|
|
#define MAX_THREADS_PER_SVC_QUEUE_LIMIT 5000 |
|
#define MAX_THREADS_PER_SVC_QUEUE_DEFAULT 5 |
|
|
|
#ifndef MAX_THREADS_PER_SVC_QUEUE |
|
# define MAX_THREADS_PER_SVC_QUEUE MAX_THREADS_PER_SVC_QUEUE_DEFAULT |
|
#endif |
|
|
|
Uint32 max_threads_per_svc_queue; |
|
|
|
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL |
|
MessageQueueService::kill_idle_threads(void *parm) |
{ | { |
| |
static struct timeval now, last = {0,0}; | static struct timeval now, last = {0,0}; |
|
|
gettimeofday(&last, NULL); | gettimeofday(&last, NULL); |
try | try |
{ | { |
dead_threads = MessageQueueService::_thread_pool->kill_dead_threads(); |
dead_threads = MessageQueueService::_thread_pool->cleanupIdleThreads(); |
} | } |
catch(...) | catch(...) |
{ | { |
|
|
#endif | #endif |
} | } |
| |
|
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::polling_routine(void *parm) |
void MessageQueueService::force_shutdown(Boolean destroy_flag) |
|
{ | { |
return; |
Thread *myself = reinterpret_cast<Thread *>(parm); |
|
List<MessageQueueService, RMutex> *list = |
#ifdef MESSAGEQUEUESERVICE_DEBUG |
reinterpret_cast<List<MessageQueueService, RMutex>*>(myself->get_parm()); |
//l10n |
|
MessageLoaderParms parms("Common.MessageQueueService.FORCING_SHUTDOWN", |
|
"Forcing shutdown of CIMOM Message Router"); |
|
PEGASUS_STD(cout) << MessageLoader::getMessage(parms) << PEGASUS_STD(endl); |
|
#endif |
|
|
|
|
|
MessageQueueService *svc; |
|
int counter = 0; |
|
_polling_list.lock(); |
|
svc = _polling_list.next(0); |
|
| |
while(svc != 0) |
while (_stop_polling.get() == 0) |
{ | { |
#ifdef MESSAGEQUEUESERVICE_DEBUG |
_polling_sem.wait(); |
//l10n - reuse same MessageLoaderParms to avoid multiple creates |
|
parms.msg_id = "Common.MessageQueueService.STOPPING_SERVICE"; |
|
parms.default_msg = "Stopping $0"; |
|
parms.arg0 = svc->getQueueName(); |
|
PEGASUS_STD(cout) << MessageLoader::getMessage(parms) << PEGASUS_STD(endl); |
|
#endif |
|
| |
_polling_sem.signal(); |
if (_stop_polling.get() != 0) |
svc->_shutdown_incoming_queue(); |
{ |
counter++; |
break; |
_polling_sem.signal(); |
|
svc = _polling_list.next(svc); |
|
} | } |
_polling_list.unlock(); |
|
| |
_polling_sem.signal(); |
// The polling_routine thread must hold the lock on the |
|
// _polling_thread list while processing incoming messages. |
MessageQueueService::_stop_polling = 1; |
// This lock is used to give this thread ownership of |
|
// services on the _polling_routine list. |
|
|
|
// This is necessary to avoid confict with other threads |
|
// processing the _polling_list |
|
// (e.g., MessageQueueServer::~MessageQueueService). |
| |
if(destroy_flag == true) |
list->lock(); |
|
MessageQueueService *service = list->front(); |
|
ThreadStatus rtn = PEGASUS_THREAD_OK; |
|
while (service != NULL) |
{ | { |
|
if ((service->_incoming.count() > 0) && |
|
(service->_die.get() == 0) && |
|
(service->_threads.get() < max_threads_per_svc_queue)) |
|
{ |
|
// The _threads count is used to track the |
|
// number of active threads that have been allocated |
|
// to process messages for this service. |
|
|
|
// The _threads count MUST be incremented while |
|
// the polling_routine owns the _polling_thread |
|
// lock and has ownership of the service object. |
| |
svc = _polling_list.remove_last(); |
service->_threads++; |
while(svc) |
try |
{ | { |
delete svc; |
rtn = _thread_pool->allocate_and_awaken( |
svc = _polling_list.remove_last(); |
service, _req_proc, &_polling_sem); |
} | } |
|
catch (...) |
|
{ |
|
service->_threads--; |
| |
|
// allocate_and_awaken should never generate an exception. |
|
PEGASUS_ASSERT(0); |
} | } |
} |
// if no more threads available, break from processing loop |
|
if (rtn != PEGASUS_THREAD_OK ) |
|
|
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::polling_routine(void *parm) |
|
{ |
|
Thread *myself = reinterpret_cast<Thread *>(parm); |
|
DQueue<MessageQueueService> *list = reinterpret_cast<DQueue<MessageQueueService> *>(myself->get_parm()); |
|
while ( _stop_polling.value() == 0 ) |
|
{ |
|
_polling_sem.wait(); |
|
if(_stop_polling.value() != 0 ) |
|
{ | { |
break; |
service->_threads--; |
} |
Logger::put(Logger::STANDARD_LOG, System::CIMSERVER, Logger::TRACE, |
|
"Not enough threads to process this request. Skipping."); |
|
|
|
Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2, |
|
"Could not allocate thread for %s. " \ |
|
"Queue has %d messages waiting and %d threads servicing." \ |
|
"Skipping the service for right now. ", |
|
service->getQueueName(), |
|
service->_incoming.count(), |
|
service->_threads.get()); |
| |
list->lock(); |
pegasus_yield(); |
MessageQueueService *service = list->next(0); |
service = NULL; |
while(service != NULL) |
} |
{ |
} |
if(service->_incoming.count() > 0 ) |
if (service != NULL) |
{ | { |
_thread_pool->allocate_and_awaken(service, _req_proc); |
service = list->next_of(service); |
} | } |
service = list->next(service); |
|
} | } |
list->unlock(); | list->unlock(); |
if(_check_idle_flag.value() != 0 ) |
|
|
if (_check_idle_flag.get() != 0) |
{ | { |
_check_idle_flag = 0; | _check_idle_flag = 0; |
|
// try to do idle thread clean up processing when system is not busy |
|
// if system is busy there may not be a thread available to allocate |
|
// so nothing will be done and that is OK. |
|
|
|
if ( _thread_pool->allocate_and_awaken(service, kill_idle_threads, &_polling_sem) != PEGASUS_THREAD_OK) |
|
{ |
|
Logger::put(Logger::STANDARD_LOG, System::CIMSERVER, Logger::TRACE, |
|
"Not enough threads to kill idle threads. What an irony."); |
|
|
|
Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2, |
|
"Could not allocate thread to kill idle threads." \ |
|
"Skipping. "); |
|
} |
|
|
| |
// If there are insufficent resources to run |
|
// kill_idle_threads, then just return. |
|
_thread_pool->allocate_and_awaken(service, kill_idle_threads); |
|
} | } |
} | } |
myself->exit_self( (PEGASUS_THREAD_RETURN) 1 ); | myself->exit_self( (PEGASUS_THREAD_RETURN) 1 ); |
|
|
AtomicInt MessageQueueService::_check_idle_flag(0); | AtomicInt MessageQueueService::_check_idle_flag(0); |
| |
| |
MessageQueueService::MessageQueueService(const char *name, |
MessageQueueService::MessageQueueService( |
|
const char *name, |
Uint32 queueID, | Uint32 queueID, |
Uint32 capabilities, | Uint32 capabilities, |
Uint32 mask) | Uint32 mask) |
: Base(name, true, queueID), | : Base(name, true, queueID), |
|
|
_mask(mask), | _mask(mask), |
_die(0), | _die(0), |
_incoming(true, 0), |
_threads(0), |
_callback(true), |
_incoming(0), |
_incoming_queue_shutdown(0), |
_incoming_queue_shutdown(0) |
_callback_ready(0), |
|
_req_thread(_req_proc, this, false), |
|
_callback_thread(_callback_proc, this, false) |
|
|
|
{ | { |
| |
_capabilities = (capabilities | module_capabilities::async); | _capabilities = (capabilities | module_capabilities::async); |
|
|
_default_op_timeout.tv_sec = 30; | _default_op_timeout.tv_sec = 30; |
_default_op_timeout.tv_usec = 100; | _default_op_timeout.tv_usec = 100; |
| |
|
max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE; |
|
|
|
// if requested thread max is out of range, then set to |
|
// MAX_THREADS_PER_SVC_QUEUE_LIMIT |
|
|
|
if ((max_threads_per_svc_queue < 1) || |
|
(max_threads_per_svc_queue > MAX_THREADS_PER_SVC_QUEUE_LIMIT)) |
|
{ |
|
max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE_LIMIT; |
|
} |
|
|
|
Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2, |
|
"max_threads_per_svc_queue set to %u.", max_threads_per_svc_queue); |
|
|
AutoMutex autoMut(_meta_dispatcher_mutex); | AutoMutex autoMut(_meta_dispatcher_mutex); |
| |
if( _meta_dispatcher == 0 ) | if( _meta_dispatcher == 0 ) |
{ | { |
PEGASUS_ASSERT( _service_count.value() == 0 ); |
_stop_polling = 0; |
|
PEGASUS_ASSERT(_service_count.get() == 0); |
_meta_dispatcher = new cimom(); | _meta_dispatcher = new cimom(); |
if (_meta_dispatcher == NULL ) | if (_meta_dispatcher == NULL ) |
{ | { |
throw NullPointer(); | throw NullPointer(); |
} | } |
_thread_pool = new ThreadPool(0, "MessageQueueService", 0, 0, |
// _thread_pool = new ThreadPool(initial_cnt, "MessageQueueService", |
create_time, destroy_time, deadlock_time); |
// minimum_cnt, maximum_cnt, deallocateWait); |
|
// |
_polling_thread = new Thread(polling_routine, |
_thread_pool = |
reinterpret_cast<void *>(&_polling_list), |
new ThreadPool(0, "MessageQueueService", 0, 0, deallocateWait); |
false); |
|
while (!_polling_thread->run()) |
|
{ |
|
pegasus_yield(); |
|
} |
|
} | } |
_service_count++; | _service_count++; |
| |
|
|
throw BindFailedException(parms); | throw BindFailedException(parms); |
} | } |
| |
_polling_list.insert_last(this); |
_polling_list.insert_back(this); |
| |
// _meta_dispatcher_mutex.unlock(); //Bug#1090 |
|
// _callback_thread.run(); |
|
|
|
// _req_thread.run(); |
|
} | } |
| |
| |
MessageQueueService::~MessageQueueService(void) |
MessageQueueService::~MessageQueueService() |
{ | { |
_die = 1; | _die = 1; |
| |
if (_incoming_queue_shutdown.value() == 0 ) |
// The polling_routine locks the _polling_list while |
|
// processing the incoming messages for services on the |
|
// list. Deleting the service from the _polling_list |
|
// prior to processing, avoids synchronization issues |
|
// with the _polling_routine. |
|
|
|
// ATTN: added to prevent assertion in List in which the list does not |
|
// contain this element. |
|
_polling_list.remove(this); |
|
|
|
// ATTN: The code for closing the _incoming queue |
|
// is not working correctly. In OpenPegasus 2.5, |
|
// execution of the following code is very timing |
|
// dependent. This needs to be fix. |
|
// See Bug 4079 for details. |
|
if (_incoming_queue_shutdown.get() == 0) |
{ | { |
_shutdown_incoming_queue(); | _shutdown_incoming_queue(); |
} | } |
_callback_ready.signal(); |
|
|
// Wait until all threads processing the messages |
|
// for this service have completed. |
|
|
|
while (_threads.get() > 0) |
|
{ |
|
pegasus_yield(); |
|
} |
| |
{ | { |
AutoMutex autoMut(_meta_dispatcher_mutex); | AutoMutex autoMut(_meta_dispatcher_mutex); |
_service_count--; | _service_count--; |
if (_service_count.value() == 0 ) |
if (_service_count.get() == 0) |
{ | { |
| |
_stop_polling++; | _stop_polling++; |
_polling_sem.signal(); | _polling_sem.signal(); |
|
if (_polling_thread) { |
_polling_thread->join(); | _polling_thread->join(); |
delete _polling_thread; | delete _polling_thread; |
_polling_thread = 0; | _polling_thread = 0; |
|
} |
_meta_dispatcher->_shutdown_routed_queue(); | _meta_dispatcher->_shutdown_routed_queue(); |
delete _meta_dispatcher; | delete _meta_dispatcher; |
_meta_dispatcher = 0; | _meta_dispatcher = 0; |
|
|
_thread_pool = 0; | _thread_pool = 0; |
} | } |
} // mutex unlocks here | } // mutex unlocks here |
_polling_list.remove(this); |
|
// Clean up in case there are extra stuff on the queue. | // Clean up in case there are extra stuff on the queue. |
while (_incoming.count()) | while (_incoming.count()) |
{ | { |
delete _incoming.remove_first(); |
try { |
|
delete _incoming.dequeue(); |
|
} catch (const ListClosed &e) |
|
{ |
|
// If the list is closed, there is nothing we can do. |
|
break; |
|
} |
} | } |
} | } |
| |
void MessageQueueService::_shutdown_incoming_queue(void) |
void MessageQueueService::_shutdown_incoming_queue() |
{ | { |
|
if (_incoming_queue_shutdown.get() > 0) |
|
|
if (_incoming_queue_shutdown.value() > 0 ) |
|
return ; | return ; |
AsyncIoctl *msg = new AsyncIoctl(get_next_xid(), |
|
|
AsyncIoctl *msg = new AsyncIoctl( |
|
get_next_xid(), |
0, | 0, |
_queueId, | _queueId, |
_queueId, | _queueId, |
|
|
msg->op->_state &= ~ASYNC_OPSTATE_COMPLETE; | msg->op->_state &= ~ASYNC_OPSTATE_COMPLETE; |
| |
msg->op->_op_dest = this; | msg->op->_op_dest = this; |
msg->op->_request.insert_first(msg); |
msg->op->_request.insert_front(msg); |
|
try { |
_incoming.insert_last_wait(msg->op); |
_incoming.enqueue_wait(msg->op); |
|
_polling_sem.signal(); |
|
} catch (const ListClosed &) |
|
{ |
|
// This means the queue has already been shut-down (happens when there |
|
// are two AsyncIoctrl::IO_CLOSE messages generated and one got first |
|
// processed. |
|
delete msg; |
|
} |
|
catch (const Permission &) |
|
{ |
|
delete msg; |
|
} |
} | } |
| |
| |
|
|
} | } |
| |
| |
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::_callback_proc(void *parm) |
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::_req_proc( |
|
void * parm) |
{ | { |
Thread *myself = reinterpret_cast<Thread *>(parm); |
MessageQueueService* service = |
MessageQueueService *service = reinterpret_cast<MessageQueueService *>(myself->get_parm()); |
reinterpret_cast<MessageQueueService*>(parm); |
AsyncOpNode *operation = 0; |
PEGASUS_ASSERT(service != 0); |
|
try |
while ( service->_die.value() == 0 ) |
|
{ | { |
service->_callback_ready.wait(); |
|
| |
service->_callback.lock(); |
if (service->_die.get() != 0) |
operation = service->_callback.next(0); |
|
while( operation != NULL) |
|
{ | { |
if( ASYNC_OPSTATE_COMPLETE & operation->read_state()) |
service->_threads--; |
{ |
|
operation = service->_callback.remove_no_lock(operation); |
|
PEGASUS_ASSERT(operation != NULL); |
|
operation->_thread_ptr = myself; |
|
operation->_service_ptr = service; |
|
service->_handle_async_callback(operation); |
|
break; |
|
} |
|
operation = service->_callback.next(operation); |
|
} |
|
service->_callback.unlock(); |
|
} |
|
myself->exit_self( (PEGASUS_THREAD_RETURN) 1 ); |
|
return(0); | return(0); |
} | } |
|
|
|
|
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::_req_proc(void * parm) |
|
{ |
|
MessageQueueService *service = reinterpret_cast<MessageQueueService *>(parm); |
|
// pull messages off the incoming queue and dispatch them. then | // pull messages off the incoming queue and dispatch them. then |
// check pending messages that are non-blocking | // check pending messages that are non-blocking |
AsyncOpNode *operation = 0; | AsyncOpNode *operation = 0; |
| |
if ( service->_die.value() == 0 ) |
// many operations may have been queued. |
|
do |
{ | { |
try | try |
{ | { |
operation = service->_incoming.remove_first(); |
operation = service->_incoming.dequeue(); |
} | } |
catch(ListClosed & ) | catch(ListClosed & ) |
{ | { |
operation = 0; |
// ATTN: This appears to be a common loop exit path. |
|
//PEG_TRACE_STRING(TRC_DISCARDED_DATA, Tracer::LEVEL2, |
return(0); |
// "Caught ListClosed exception. Exiting _req_proc."); |
|
break; |
} | } |
|
|
if( operation ) | if( operation ) |
{ | { |
operation->_service_ptr = service; | operation->_service_ptr = service; |
service->_handle_incoming_operation(operation); | service->_handle_incoming_operation(operation); |
} | } |
|
} while (operation); |
} | } |
|
catch (const Exception& e) |
return(0); |
{ |
|
PEG_TRACE_STRING(TRC_DISCARDED_DATA, Tracer::LEVEL2, |
|
String("Caught exception: \"") + e.getMessage() + |
|
"\". Exiting _req_proc."); |
} | } |
|
catch (...) |
Uint32 MessageQueueService::get_pending_callback_count(void) |
|
{ | { |
return _callback.count(); |
PEG_TRACE_STRING(TRC_DISCARDED_DATA, Tracer::LEVEL2, |
|
"Caught unrecognized exception. Exiting _req_proc."); |
|
} |
|
service->_threads--; |
|
return(0); |
} | } |
| |
| |
|
void MessageQueueService::_sendwait_callback( |
void MessageQueueService::_sendwait_callback(AsyncOpNode *op, |
AsyncOpNode *op, |
MessageQueue *q, | MessageQueue *q, |
void *parm) | void *parm) |
{ | { |
|
|
| |
| |
void MessageQueueService::_handle_incoming_operation(AsyncOpNode *operation) | void MessageQueueService::_handle_incoming_operation(AsyncOpNode *operation) |
// Thread *thread, |
|
// MessageQueue *queue) |
|
{ | { |
if ( operation != 0 ) | if ( operation != 0 ) |
{ | { |
|
|
// << Tue Feb 19 14:10:38 2002 mdd >> | // << Tue Feb 19 14:10:38 2002 mdd >> |
operation->lock(); | operation->lock(); |
| |
Message *rq = operation->_request.next(0); |
Message *rq = operation->_request.front(); |
| |
// optimization <<< Thu Mar 7 21:04:05 2002 mdd >>> | // optimization <<< Thu Mar 7 21:04:05 2002 mdd >>> |
// move this to the bottom of the loop when the majority of | // move this to the bottom of the loop when the majority of |
|
|
// divert legacy messages to handleEnqueue | // divert legacy messages to handleEnqueue |
if ((rq != 0) && (!(rq->getMask() & message_mask::ha_async))) | if ((rq != 0) && (!(rq->getMask() & message_mask::ha_async))) |
{ | { |
rq = operation->_request.remove_first() ; |
rq = operation->_request.remove_front() ; |
operation->unlock(); | operation->unlock(); |
// delete the op node | // delete the op node |
operation->release(); | operation->release(); |
|
|
operation->_flags & ASYNC_OPFLAGS_SAFE_CALLBACK) && | operation->_flags & ASYNC_OPFLAGS_SAFE_CALLBACK) && |
(operation->_state & ASYNC_OPSTATE_COMPLETE)) | (operation->_state & ASYNC_OPSTATE_COMPLETE)) |
{ | { |
|
|
operation->unlock(); | operation->unlock(); |
_handle_async_callback(operation); | _handle_async_callback(operation); |
} | } |
|
|
Boolean MessageQueueService::_enqueueResponse( | Boolean MessageQueueService::_enqueueResponse( |
Message* request, | Message* request, |
Message* response) | Message* response) |
|
|
{ | { |
|
|
|
STAT_COPYDISPATCHER |
|
|
PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE, | PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE, |
"MessageQueueService::_enqueueResponse"); | "MessageQueueService::_enqueueResponse"); |
| |
|
|
| |
static_cast<AsyncLegacyOperationStart *>(async)->get_action(); | static_cast<AsyncLegacyOperationStart *>(async)->get_action(); |
| |
|
|
AsyncLegacyOperationResult *async_result = | AsyncLegacyOperationResult *async_result = |
new AsyncLegacyOperationResult( | new AsyncLegacyOperationResult( |
async->getKey(), | async->getKey(), |
async->getRouting(), | async->getRouting(), |
op, | op, |
response); | response); |
_completeAsyncResponse(async, |
_completeAsyncResponse( |
|
async, |
async_result, | async_result, |
ASYNC_OPSTATE_COMPLETE, | ASYNC_OPSTATE_COMPLETE, |
0); | 0); |
|
|
} | } |
| |
| |
void MessageQueueService::_completeAsyncResponse(AsyncRequest *request, |
void MessageQueueService::_completeAsyncResponse( |
|
AsyncRequest *request, |
AsyncReply *reply, | AsyncReply *reply, |
Uint32 state, | Uint32 state, |
Uint32 flag) | Uint32 flag) |
|
|
} | } |
| |
| |
void MessageQueueService::_complete_op_node(AsyncOpNode *op, |
void MessageQueueService::_complete_op_node( |
|
AsyncOpNode *op, |
Uint32 state, | Uint32 state, |
Uint32 flag, | Uint32 flag, |
Uint32 code) | Uint32 code) |
|
|
| |
Boolean MessageQueueService::accept_async(AsyncOpNode *op) | Boolean MessageQueueService::accept_async(AsyncOpNode *op) |
{ | { |
if (_incoming_queue_shutdown.value() > 0 ) |
if (_incoming_queue_shutdown.get() > 0) |
return false; | return false; |
|
if (_polling_thread == NULL) |
|
{ |
|
_polling_thread = new Thread( |
|
polling_routine, |
|
reinterpret_cast<void *>(&_polling_list), |
|
false); |
|
ThreadStatus tr = PEGASUS_THREAD_OK; |
|
while ( (tr =_polling_thread->run()) != PEGASUS_THREAD_OK) |
|
{ |
|
if (tr == PEGASUS_THREAD_INSUFFICIENT_RESOURCES) |
|
pegasus_yield(); |
|
else |
|
throw Exception(MessageLoaderParms("Common.MessageQueueService.NOT_ENOUGH_THREAD", |
|
"Could not allocate thread for the polling thread.")); |
|
} |
|
} |
// ATTN optimization remove the message checking altogether in the base | // ATTN optimization remove the message checking altogether in the base |
// << Mon Feb 18 14:02:20 2002 mdd >> | // << Mon Feb 18 14:02:20 2002 mdd >> |
op->lock(); | op->lock(); |
Message *rq = op->_request.next(0); |
Message *rq = op->_request.front(); |
Message *rp = op->_response.next(0); |
Message *rp = op->_response.front(); |
op->unlock(); | op->unlock(); |
| |
if( (rq != 0 && (true == messageOK(rq))) || (rp != 0 && ( true == messageOK(rp) )) && |
if ((rq != 0 && (true == messageOK(rq))) || |
_die.value() == 0 ) |
(rp != 0 && (true == messageOK(rp))) && _die.get() == 0) |
{ | { |
_incoming.insert_last_wait(op); |
_incoming.enqueue_wait(op); |
_polling_sem.signal(); | _polling_sem.signal(); |
return true; | return true; |
} | } |
|
|
| |
Boolean MessageQueueService::messageOK(const Message *msg) | Boolean MessageQueueService::messageOK(const Message *msg) |
{ | { |
if (_incoming_queue_shutdown.value() > 0 ) |
if (_incoming_queue_shutdown.get() > 0) |
return false; | return false; |
return true; | return true; |
} | } |
|
|
{ | { |
// default action is to echo a heartbeat response | // default action is to echo a heartbeat response |
| |
AsyncReply *reply = |
AsyncReply *reply = new AsyncReply( |
new AsyncReply(async_messages::HEARTBEAT, |
async_messages::HEARTBEAT, |
req->getKey(), | req->getKey(), |
req->getRouting(), | req->getRouting(), |
0, | 0, |
|
|
| |
void MessageQueueService::handle_heartbeat_reply(AsyncReply *rep) | void MessageQueueService::handle_heartbeat_reply(AsyncReply *rep) |
{ | { |
; |
|
} | } |
| |
void MessageQueueService::handle_AsyncIoctl(AsyncIoctl *req) | void MessageQueueService::handle_AsyncIoctl(AsyncIoctl *req) |
{ | { |
|
|
switch( req->ctl ) | switch( req->ctl ) |
{ | { |
case AsyncIoctl::IO_CLOSE: | case AsyncIoctl::IO_CLOSE: |
{ | { |
|
|
MessageQueueService *service = static_cast<MessageQueueService *>(req->op->_service_ptr); | MessageQueueService *service = static_cast<MessageQueueService *>(req->op->_service_ptr); |
| |
#ifdef MESSAGEQUEUESERVICE_DEBUG | #ifdef MESSAGEQUEUESERVICE_DEBUG |
|
|
// ensure we do not accept any further messages | // ensure we do not accept any further messages |
| |
// ensure we don't recurse on IO_CLOSE | // ensure we don't recurse on IO_CLOSE |
if( _incoming_queue_shutdown.value() > 0 ) |
if (_incoming_queue_shutdown.get() > 0) |
break; | break; |
| |
// set the closing flag | // set the closing flag |
|
|
AsyncOpNode *operation; | AsyncOpNode *operation; |
try | try |
{ | { |
operation = service->_incoming.remove_first(); |
operation = service->_incoming.dequeue(); |
} | } |
catch(IPCException & ) | catch(IPCException & ) |
{ | { |
|
|
break; | break; |
} // message processing loop | } // message processing loop |
| |
// shutdown the AsyncDQueue |
// shutdown the AsyncQueue |
service->_incoming.shutdown_queue(); | service->_incoming.shutdown_queue(); |
return; | return; |
} | } |
|
|
_make_response(req, async_results::CIM_STOPPED); | _make_response(req, async_results::CIM_STOPPED); |
// now tell the meta dispatcher we are stopped | // now tell the meta dispatcher we are stopped |
update_service(_capabilities, _mask); | update_service(_capabilities, _mask); |
|
|
} | } |
|
|
void MessageQueueService::handle_CimServicePause(CimServicePause *req) | void MessageQueueService::handle_CimServicePause(CimServicePause *req) |
{ | { |
// set the paused bit and update | // set the paused bit and update |
|
|
_make_response(req, async_results::CIM_PAUSED); | _make_response(req, async_results::CIM_PAUSED); |
// now tell the meta dispatcher we are stopped | // now tell the meta dispatcher we are stopped |
} | } |
|
|
void MessageQueueService::handle_CimServiceResume(CimServiceResume *req) | void MessageQueueService::handle_CimServiceResume(CimServiceResume *req) |
{ | { |
// clear the paused bit and update | // clear the paused bit and update |
|
|
; | ; |
} | } |
| |
AsyncOpNode *MessageQueueService::get_op(void) |
AsyncOpNode *MessageQueueService::get_op() |
{ | { |
AsyncOpNode *op = new AsyncOpNode(); | AsyncOpNode *op = new AsyncOpNode(); |
| |
|
|
} | } |
| |
| |
Boolean MessageQueueService::ForwardOp(AsyncOpNode *op, |
Boolean MessageQueueService::ForwardOp( |
|
AsyncOpNode *op, |
Uint32 destination) | Uint32 destination) |
{ | { |
PEGASUS_ASSERT(op != 0 ); | PEGASUS_ASSERT(op != 0 ); |
|
|
} | } |
| |
| |
Boolean MessageQueueService::SendAsync(AsyncOpNode *op, |
Boolean MessageQueueService::SendAsync( |
|
AsyncOpNode *op, |
Uint32 destination, | Uint32 destination, |
void (*callback)(AsyncOpNode *, |
void (*callback)(AsyncOpNode *, MessageQueue *, void *), |
MessageQueue *, |
|
void *), |
|
MessageQueue *callback_response_q, | MessageQueue *callback_response_q, |
void *callback_ptr) | void *callback_ptr) |
{ | { |
|
|
op->_op_dest = MessageQueue::lookup(destination); // destination of this message | op->_op_dest = MessageQueue::lookup(destination); // destination of this message |
op->_flags |= ASYNC_OPFLAGS_CALLBACK; | op->_flags |= ASYNC_OPFLAGS_CALLBACK; |
op->_flags &= ~(ASYNC_OPFLAGS_FIRE_AND_FORGET); | op->_flags &= ~(ASYNC_OPFLAGS_FIRE_AND_FORGET); |
op->_state &= ~ASYNC_OPSTATE_COMPLETE; |
|
// initialize the callback data | // initialize the callback data |
op->_async_callback = callback; // callback function to be executed by recpt. of response | op->_async_callback = callback; // callback function to be executed by recpt. of response |
op->_callback_node = op; // the op node | op->_callback_node = op; // the op node |
|
|
} | } |
| |
| |
Boolean MessageQueueService::SendAsync(Message *msg, |
Boolean MessageQueueService::SendAsync( |
|
Message *msg, |
Uint32 destination, | Uint32 destination, |
void (*callback)(Message *response, |
void (*callback)(Message *response, void *handle, void *parameter), |
void *handle, |
|
void *parameter), |
|
void *handle, | void *handle, |
void *parameter) | void *parameter) |
{ | { |
|
|
op->_callback_parameter = parameter; | op->_callback_parameter = parameter; |
op->_callback_response_q = this; | op->_callback_response_q = this; |
| |
|
|
if( ! (msg->getMask() & message_mask::ha_async) ) | if( ! (msg->getMask() & message_mask::ha_async) ) |
{ | { |
AsyncLegacyOperationStart *wrapper = |
AsyncLegacyOperationStart *wrapper = new AsyncLegacyOperationStart( |
new AsyncLegacyOperationStart(get_next_xid(), |
get_next_xid(), |
op, | op, |
destination, | destination, |
msg, | msg, |
|
|
} | } |
else | else |
{ | { |
op->_request.insert_first(msg); |
op->_request.insert_front(msg); |
(static_cast<AsyncMessage *>(msg))->op = op; | (static_cast<AsyncMessage *>(msg))->op = op; |
} | } |
|
|
_callback.insert_last(op); |
|
return _meta_dispatcher->route_async(op); | return _meta_dispatcher->route_async(op); |
} | } |
| |
| |
Boolean MessageQueueService::SendForget(Message *msg) | Boolean MessageQueueService::SendForget(Message *msg) |
{ | { |
|
|
|
|
AsyncOpNode *op = 0; | AsyncOpNode *op = 0; |
Uint32 mask = msg->getMask(); | Uint32 mask = msg->getMask(); |
| |
|
|
if( op == 0 ) | if( op == 0 ) |
{ | { |
op = get_op(); | op = get_op(); |
op->_request.insert_first(msg); |
op->_request.insert_front(msg); |
if (mask & message_mask::ha_async) | if (mask & message_mask::ha_async) |
|
{ |
(static_cast<AsyncMessage *>(msg))->op = op; | (static_cast<AsyncMessage *>(msg))->op = op; |
} | } |
|
} |
op->_op_dest = MessageQueue::lookup(msg->dest); | op->_op_dest = MessageQueue::lookup(msg->dest); |
op->_flags |= ASYNC_OPFLAGS_FIRE_AND_FORGET; | op->_flags |= ASYNC_OPFLAGS_FIRE_AND_FORGET; |
op->_flags &= ~(ASYNC_OPFLAGS_CALLBACK | ASYNC_OPFLAGS_SAFE_CALLBACK | op->_flags &= ~(ASYNC_OPFLAGS_CALLBACK | ASYNC_OPFLAGS_SAFE_CALLBACK |
|
|
if (request->op == 0) | if (request->op == 0) |
{ | { |
request->op = get_op(); | request->op = get_op(); |
request->op->_request.insert_first(request); |
request->op->_request.insert_front(request); |
destroy_op = true; | destroy_op = true; |
} | } |
| |
request->block = false; | request->block = false; |
request->op->_flags |= ASYNC_OPFLAGS_PSEUDO_CALLBACK; | request->op->_flags |= ASYNC_OPFLAGS_PSEUDO_CALLBACK; |
SendAsync(request->op, |
SendAsync( |
|
request->op, |
request->dest, | request->dest, |
_sendwait_callback, | _sendwait_callback, |
this, | this, |
(void *)0); | (void *)0); |
| |
request->op->_client_sem.wait(); | request->op->_client_sem.wait(); |
|
|
request->op->lock(); | request->op->lock(); |
AsyncReply * rpl = static_cast<AsyncReply *>(request->op->_response.remove_first()); |
AsyncReply * rpl = static_cast<AsyncReply *>(request->op->_response.remove_front()); |
rpl->op = 0; | rpl->op = 0; |
request->op->unlock(); | request->op->unlock(); |
| |
|
|
} | } |
| |
| |
Boolean MessageQueueService::register_service(String name, |
Boolean MessageQueueService::register_service( |
|
String name, |
Uint32 capabilities, | Uint32 capabilities, |
Uint32 mask) | Uint32 mask) |
|
|
{ | { |
RegisterCimService *msg = new RegisterCimService(get_next_xid(), |
RegisterCimService *msg = new RegisterCimService( |
|
get_next_xid(), |
0, | 0, |
true, | true, |
name, | name, |
|
|
{ | { |
if(reply->result == async_results::OK || | if(reply->result == async_results::OK || |
reply->result == async_results::MODULE_ALREADY_REGISTERED ) | reply->result == async_results::MODULE_ALREADY_REGISTERED ) |
|
{ |
registered = true; | registered = true; |
} | } |
} | } |
|
} |
| |
delete reply; | delete reply; |
} | } |
|
|
| |
Boolean MessageQueueService::update_service(Uint32 capabilities, Uint32 mask) | Boolean MessageQueueService::update_service(Uint32 capabilities, Uint32 mask) |
{ | { |
|
UpdateCimService *msg = new UpdateCimService( |
|
get_next_xid(), |
UpdateCimService *msg = new UpdateCimService(get_next_xid(), |
|
0, | 0, |
true, | true, |
_queueId, | _queueId, |
|
|
if(reply->getMask() & message_mask::ha_reply) | if(reply->getMask() & message_mask::ha_reply) |
{ | { |
if(static_cast<AsyncReply *>(reply)->result == async_results::OK) | if(static_cast<AsyncReply *>(reply)->result == async_results::OK) |
|
{ |
registered = true; | registered = true; |
} | } |
} | } |
|
} |
delete reply; | delete reply; |
} | } |
delete msg; | delete msg; |
|
|
} | } |
| |
| |
Boolean MessageQueueService::deregister_service(void) |
Boolean MessageQueueService::deregister_service() |
{ | { |
| |
_meta_dispatcher->deregister_module(_queueId); | _meta_dispatcher->deregister_module(_queueId); |
|
|
} | } |
| |
| |
void MessageQueueService::find_services(String name, |
void MessageQueueService::find_services( |
|
String name, |
Uint32 capabilities, | Uint32 capabilities, |
Uint32 mask, | Uint32 mask, |
Array<Uint32> *results) | Array<Uint32> *results) |
{ | { |
|
|
if( results == 0 ) | if( results == 0 ) |
|
{ |
throw NullPointer(); | throw NullPointer(); |
|
} |
| |
results->clear(); | results->clear(); |
| |
FindServiceQueue *req = |
FindServiceQueue *req = new FindServiceQueue( |
new FindServiceQueue(get_next_xid(), |
get_next_xid(), |
0, | 0, |
_queueId, | _queueId, |
true, | true, |
|
|
void MessageQueueService::enumerate_service(Uint32 queue, message_module *result) | void MessageQueueService::enumerate_service(Uint32 queue, message_module *result) |
{ | { |
if(result == 0) | if(result == 0) |
|
{ |
throw NullPointer(); | throw NullPointer(); |
|
} |
| |
EnumerateService *req |
EnumerateService *req = new EnumerateService( |
= new EnumerateService(get_next_xid(), |
get_next_xid(), |
0, | 0, |
_queueId, | _queueId, |
true, | true, |
|
|
return; | return; |
} | } |
| |
Uint32 MessageQueueService::get_next_xid(void) |
Uint32 MessageQueueService::get_next_xid() |
{ | { |
static Mutex _monitor; |
AutoMutex autoMut(_xidMutex); |
Uint32 value; |
return ++_xid; |
AutoMutex autoMut(_monitor); |
|
_xid++; |
|
value = _xid.value(); |
|
return value; |
|
|
|
} | } |
| |
PEGASUS_NAMESPACE_END | PEGASUS_NAMESPACE_END |