version 1.112, 2005/06/14 17:10:37
|
version 1.119.12.3, 2006/06/30 02:52:01
|
|
|
//%2005//////////////////////////////////////////////////////////////////////// |
//%2006//////////////////////////////////////////////////////////////////////// |
// | // |
// Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development | // Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development |
// Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems. | // Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems. |
|
|
// IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group. | // IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group. |
// Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.; | // Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.; |
// EMC Corporation; VERITAS Software Corporation; The Open Group. | // EMC Corporation; VERITAS Software Corporation; The Open Group. |
|
// Copyright (c) 2006 Hewlett-Packard Development Company, L.P.; IBM Corp.; |
|
// EMC Corporation; Symantec Corporation; The Open Group. |
// | // |
// Permission is hereby granted, free of charge, to any person obtaining a copy | // Permission is hereby granted, free of charge, to any person obtaining a copy |
// of this software and associated documentation files (the "Software"), to | // of this software and associated documentation files (the "Software"), to |
|
|
// Modified By: | // Modified By: |
// Amit K Arora, IBM (amita@in.ibm.com) for Bug#1090,#2657 | // Amit K Arora, IBM (amita@in.ibm.com) for Bug#1090,#2657 |
// Josephine Eskaline Joyce, IBM (jojustin@in.ibm.com) for Bug#3259 | // Josephine Eskaline Joyce, IBM (jojustin@in.ibm.com) for Bug#3259 |
|
// Jim Wunderlich (Jim_Wunderlich@prodigy.net) |
// | // |
//%///////////////////////////////////////////////////////////////////////////// | //%///////////////////////////////////////////////////////////////////////////// |
| |
|
|
PEGASUS_NAMESPACE_BEGIN | PEGASUS_NAMESPACE_BEGIN |
| |
cimom *MessageQueueService::_meta_dispatcher = 0; | cimom *MessageQueueService::_meta_dispatcher = 0; |
AtomicInt MessageQueueService::_service_count = 0; |
AtomicInt MessageQueueService::_service_count(0); |
AtomicInt MessageQueueService::_xid(1); |
Mutex MessageQueueService::_xidMutex; |
|
Uint32 MessageQueueService::_xid = 1; |
Mutex MessageQueueService::_meta_dispatcher_mutex; | Mutex MessageQueueService::_meta_dispatcher_mutex; |
| |
static struct timeval deallocateWait = {300, 0}; | static struct timeval deallocateWait = {300, 0}; |
| |
ThreadPool *MessageQueueService::_thread_pool = 0; | ThreadPool *MessageQueueService::_thread_pool = 0; |
| |
DQueue<MessageQueueService> MessageQueueService::_polling_list(true); |
List<MessageQueueService, RecursiveMutex> MessageQueueService::_polling_list; |
| |
Thread* MessageQueueService::_polling_thread = 0; | Thread* MessageQueueService::_polling_thread = 0; |
| |
|
|
{ | { |
return _thread_pool; | return _thread_pool; |
} | } |
|
|
// | // |
// MAX_THREADS_PER_SVC_QUEUE_LIMIT |
// MAX_THREADS_PER_SVC_QUEUE |
// |
|
// 5000 is seriously too high a number for the limit but since |
|
// previously there was no limit at all this is intended to approximate |
|
// that behavior. In my testing on a unit processor system the system |
|
// behaved best with a low number 2 to 5 for the MAX_THREADS_PER_SVC_QUEUE. |
|
// When set to 1000 the system deadlocked with indications that were |
|
// not delivered and apparently left sitting within the server in a queue. |
|
// | // |
// JR Wunderlich Jun 6, 2005 | // JR Wunderlich Jun 6, 2005 |
// | // |
| |
#define MAX_THREADS_PER_SVC_QUEUE_LIMIT 5000 | #define MAX_THREADS_PER_SVC_QUEUE_LIMIT 5000 |
|
#define MAX_THREADS_PER_SVC_QUEUE_DEFAULT 5 |
|
|
|
#ifndef MAX_THREADS_PER_SVC_QUEUE |
|
# define MAX_THREADS_PER_SVC_QUEUE MAX_THREADS_PER_SVC_QUEUE_DEFAULT |
|
#endif |
| |
Uint32 max_threads_per_svc_queue; | Uint32 max_threads_per_svc_queue; |
| |
|
|
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::polling_routine(void *parm) | PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::polling_routine(void *parm) |
{ | { |
Thread *myself = reinterpret_cast<Thread *>(parm); | Thread *myself = reinterpret_cast<Thread *>(parm); |
DQueue<MessageQueueService> *list = reinterpret_cast<DQueue<MessageQueueService> *>(myself->get_parm()); |
List<MessageQueueService, RecursiveMutex> *list = |
while (_stop_polling.value() == 0) |
reinterpret_cast<List<MessageQueueService, RecursiveMutex>*>(myself->get_parm()); |
|
|
|
while (_stop_polling.get() == 0) |
{ | { |
_polling_sem.wait(); | _polling_sem.wait(); |
| |
if (_stop_polling.value() != 0) |
if (_stop_polling.get() != 0) |
{ | { |
break; | break; |
} | } |
| |
|
// The polling_routine thread must hold the lock on the |
|
// _polling_thread list while processing incoming messages. |
|
// This lock is used to give this thread ownership of |
|
// services on the _polling_routine list. |
|
|
|
// This is necessary to avoid confict with other threads |
|
// processing the _polling_list |
|
// (e.g., MessageQueueServer::~MessageQueueService). |
|
|
list->lock(); | list->lock(); |
int list_index = 0; |
MessageQueueService *service = list->front(); |
MessageQueueService *service = list->next(0); |
ThreadStatus rtn = PEGASUS_THREAD_OK; |
while(service != NULL) | while(service != NULL) |
{ | { |
int rtn; |
if ((service->_incoming.count() > 0) && |
rtn = true; |
(service->_die.get() == 0) && |
if (service->_incoming.count() > 0 |
(service->_threads.get() < max_threads_per_svc_queue)) |
&& service->_die.value() == 0 |
{ |
&& service->_threads <= max_threads_per_svc_queue) |
// The _threads count is used to track the |
rtn = _thread_pool->allocate_and_awaken(service, _req_proc, |
// number of active threads that have been allocated |
&_polling_sem); |
// to process messages for this service. |
|
|
|
// The _threads count MUST be incremented while |
|
// the polling_routine owns the _polling_thread |
|
// lock and has ownership of the service object. |
|
|
|
service->_threads++; |
|
try |
|
{ |
|
rtn = _thread_pool->allocate_and_awaken( |
|
service, _req_proc, &_polling_sem); |
|
} |
|
catch (...) |
|
{ |
|
service->_threads--; |
| |
|
// allocate_and_awaken should never generate an exception. |
|
PEGASUS_ASSERT(0); |
|
} |
// if no more threads available, break from processing loop | // if no more threads available, break from processing loop |
if (rtn == false) |
if (rtn != PEGASUS_THREAD_OK ) |
{ | { |
|
service->_threads--; |
|
Logger::put(Logger::STANDARD_LOG, System::CIMSERVER, Logger::TRACE, |
|
"Not enough threads to process this request. Skipping."); |
|
|
|
Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2, |
|
"Could not allocate thread for %s. " \ |
|
"Queue has %d messages waiting and %d threads servicing." \ |
|
"Skipping the service for right now. ", |
|
service->getQueueName(), |
|
service->_incoming.count(), |
|
service->_threads.get()); |
|
|
|
pegasus_yield(); |
service = NULL; | service = NULL; |
} | } |
else |
} |
|
if (service != NULL) |
{ | { |
service = list->next(service); |
service = list->next_of(service); |
} | } |
} | } |
list->unlock(); | list->unlock(); |
| |
if (_check_idle_flag.value() != 0) |
if (_check_idle_flag.get() != 0) |
{ | { |
_check_idle_flag = 0; | _check_idle_flag = 0; |
// try to do idle thread clean up processing when system is not busy | // try to do idle thread clean up processing when system is not busy |
// if system is busy there may not be a thread available to allocate | // if system is busy there may not be a thread available to allocate |
// so nothing will be done and that is OK. | // so nothing will be done and that is OK. |
| |
_thread_pool->allocate_and_awaken(service, kill_idle_threads, &_polling_sem); |
if ( _thread_pool->allocate_and_awaken(service, kill_idle_threads, &_polling_sem) != PEGASUS_THREAD_OK) |
|
{ |
|
Logger::put(Logger::STANDARD_LOG, System::CIMSERVER, Logger::TRACE, |
|
"Not enough threads to kill idle threads. What an irony."); |
|
|
|
Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2, |
|
"Could not allocate thread to kill idle threads." \ |
|
"Skipping. "); |
|
} |
|
|
| |
} | } |
} | } |
|
|
_mask(mask), | _mask(mask), |
_die(0), | _die(0), |
_threads(0), | _threads(0), |
_incoming(true, 0), |
_incoming(0), |
_incoming_queue_shutdown(0) | _incoming_queue_shutdown(0) |
{ | { |
| |
|
|
| |
max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE; | max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE; |
| |
// if requested threads gt MAX_THREADS_PER_SVC_QUEUE_LIMIT |
// if requested thread max is out of range, then set to |
// then set to MAX_THREADS_PER_SVC_QUEUE_LIMIT |
// MAX_THREADS_PER_SVC_QUEUE_LIMIT |
|
|
if (max_threads_per_svc_queue > MAX_THREADS_PER_SVC_QUEUE_LIMIT) |
|
{ |
|
max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE_LIMIT; |
|
} |
|
|
|
// if requested threads eq 0 (unlimited) |
|
// then set to MAX_THREADS_PER_SVC_QUEUE_LIMIT |
|
| |
if (max_threads_per_svc_queue == 0) |
if ((max_threads_per_svc_queue < 1) || |
|
(max_threads_per_svc_queue > MAX_THREADS_PER_SVC_QUEUE_LIMIT)) |
{ | { |
max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE_LIMIT; | max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE_LIMIT; |
} | } |
| |
// cout << "MAX_THREADS_PER_SVC_QUEUE = " << MAX_THREADS_PER_SVC_QUEUE << endl; |
Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2, |
// cout << "max_threads_per_svc_queue set to = " << max_threads_per_svc_queue << endl; |
"max_threads_per_svc_queue set to %u.", max_threads_per_svc_queue); |
|
|
| |
AutoMutex autoMut(_meta_dispatcher_mutex); | AutoMutex autoMut(_meta_dispatcher_mutex); |
| |
if (_meta_dispatcher == 0) | if (_meta_dispatcher == 0) |
{ | { |
_stop_polling = 0; | _stop_polling = 0; |
PEGASUS_ASSERT(_service_count.value() == 0); |
PEGASUS_ASSERT(_service_count.get() == 0); |
_meta_dispatcher = new cimom(); | _meta_dispatcher = new cimom(); |
if (_meta_dispatcher == NULL) | if (_meta_dispatcher == NULL) |
{ | { |
|
|
throw BindFailedException(parms); | throw BindFailedException(parms); |
} | } |
| |
_polling_list.insert_last(this); |
_polling_list.insert_back(this); |
| |
} | } |
| |
|
|
{ | { |
_die = 1; | _die = 1; |
| |
if (_incoming_queue_shutdown.value() == 0) |
// The polling_routine locks the _polling_list while |
|
// processing the incoming messages for services on the |
|
// list. Deleting the service from the _polling_list |
|
// prior to processing, avoids synchronization issues |
|
// with the _polling_routine. |
|
|
|
// ATTN: added to prevent assertion in List in which the list does not |
|
// contain this element. |
|
_polling_list.remove(this); |
|
|
|
// ATTN: The code for closing the _incoming queue |
|
// is not working correctly. In OpenPegasus 2.5, |
|
// execution of the following code is very timing |
|
// dependent. This needs to be fix. |
|
// See Bug 4079 for details. |
|
if (_incoming_queue_shutdown.get() == 0) |
{ | { |
_shutdown_incoming_queue(); | _shutdown_incoming_queue(); |
|
|
} | } |
| |
while (_threads.value() > 0) |
// Wait until all threads processing the messages |
|
// for this service have completed. |
|
|
|
while (_threads.get() > 0) |
{ | { |
pegasus_yield(); | pegasus_yield(); |
} | } |
_polling_list.remove(this); |
|
{ | { |
AutoMutex autoMut(_meta_dispatcher_mutex); | AutoMutex autoMut(_meta_dispatcher_mutex); |
_service_count--; | _service_count--; |
if (_service_count.value() == 0) |
if (_service_count.get() == 0) |
{ | { |
| |
_stop_polling++; | _stop_polling++; |
|
|
while (_incoming.count()) | while (_incoming.count()) |
{ | { |
try { | try { |
delete _incoming.remove_first(); |
delete _incoming.dequeue(); |
} catch (const ListClosed &e) | } catch (const ListClosed &e) |
{ | { |
// If the list is closed, there is nothing we can do. | // If the list is closed, there is nothing we can do. |
|
|
| |
void MessageQueueService::_shutdown_incoming_queue() | void MessageQueueService::_shutdown_incoming_queue() |
{ | { |
if (_incoming_queue_shutdown.value() > 0) |
if (_incoming_queue_shutdown.get() > 0) |
return; | return; |
| |
AsyncIoctl *msg = new AsyncIoctl( | AsyncIoctl *msg = new AsyncIoctl( |
|
|
msg->op->_state &= ~ASYNC_OPSTATE_COMPLETE; | msg->op->_state &= ~ASYNC_OPSTATE_COMPLETE; |
| |
msg->op->_op_dest = this; | msg->op->_op_dest = this; |
msg->op->_request.insert_first(msg); |
msg->op->_request.insert_front(msg); |
try { | try { |
_incoming.insert_last_wait(msg->op); |
_incoming.enqueue_wait(msg->op); |
_polling_sem.signal(); | _polling_sem.signal(); |
} catch (const ListClosed &) | } catch (const ListClosed &) |
{ | { |
|
|
try | try |
{ | { |
| |
if (service->_die.value() != 0) |
if (service->_die.get() != 0) |
{ | { |
|
service->_threads--; |
return (0); | return (0); |
} | } |
service->_threads++; |
|
// pull messages off the incoming queue and dispatch them. then | // pull messages off the incoming queue and dispatch them. then |
// check pending messages that are non-blocking | // check pending messages that are non-blocking |
AsyncOpNode *operation = 0; | AsyncOpNode *operation = 0; |
|
|
{ | { |
try | try |
{ | { |
operation = service->_incoming.remove_first(); |
operation = service->_incoming.dequeue(); |
} | } |
catch (ListClosed &) | catch (ListClosed &) |
{ | { |
|
|
// << Tue Feb 19 14:10:38 2002 mdd >> | // << Tue Feb 19 14:10:38 2002 mdd >> |
operation->lock(); | operation->lock(); |
| |
Message *rq = operation->_request.next(0); |
Message *rq = operation->_request.front(); |
| |
// optimization <<< Thu Mar 7 21:04:05 2002 mdd >>> | // optimization <<< Thu Mar 7 21:04:05 2002 mdd >>> |
// move this to the bottom of the loop when the majority of | // move this to the bottom of the loop when the majority of |
|
|
// divert legacy messages to handleEnqueue | // divert legacy messages to handleEnqueue |
if ((rq != 0) && (!(rq->getMask() & message_mask::ha_async))) | if ((rq != 0) && (!(rq->getMask() & message_mask::ha_async))) |
{ | { |
rq = operation->_request.remove_first() ; |
rq = operation->_request.remove_front() ; |
operation->unlock(); | operation->unlock(); |
// delete the op node | // delete the op node |
operation->release(); | operation->release(); |
|
|
| |
Boolean MessageQueueService::accept_async(AsyncOpNode *op) | Boolean MessageQueueService::accept_async(AsyncOpNode *op) |
{ | { |
if (_incoming_queue_shutdown.value() > 0) |
if (_incoming_queue_shutdown.get() > 0) |
return false; | return false; |
if (_polling_thread == NULL) | if (_polling_thread == NULL) |
{ | { |
|
|
polling_routine, | polling_routine, |
reinterpret_cast<void *>(&_polling_list), | reinterpret_cast<void *>(&_polling_list), |
false); | false); |
while (!_polling_thread->run()) |
ThreadStatus tr = PEGASUS_THREAD_OK; |
|
while ( (tr =_polling_thread->run()) != PEGASUS_THREAD_OK) |
{ | { |
|
if (tr == PEGASUS_THREAD_INSUFFICIENT_RESOURCES) |
pegasus_yield(); | pegasus_yield(); |
|
else |
|
throw Exception(MessageLoaderParms("Common.MessageQueueService.NOT_ENOUGH_THREAD", |
|
"Could not allocate thread for the polling thread.")); |
} | } |
} | } |
// ATTN optimization remove the message checking altogether in the base | // ATTN optimization remove the message checking altogether in the base |
// << Mon Feb 18 14:02:20 2002 mdd >> | // << Mon Feb 18 14:02:20 2002 mdd >> |
op->lock(); | op->lock(); |
Message *rq = op->_request.next(0); |
Message *rq = op->_request.front(); |
Message *rp = op->_response.next(0); |
Message *rp = op->_response.front(); |
op->unlock(); | op->unlock(); |
| |
if ((rq != 0 && (true == messageOK(rq))) || | if ((rq != 0 && (true == messageOK(rq))) || |
(rp != 0 && (true == messageOK(rp))) && _die.value() == 0) |
(rp != 0 && (true == messageOK(rp))) && _die.get() == 0) |
{ | { |
_incoming.insert_last_wait(op); |
_incoming.enqueue_wait(op); |
_polling_sem.signal(); | _polling_sem.signal(); |
return true; | return true; |
} | } |
|
|
| |
Boolean MessageQueueService::messageOK(const Message *msg) | Boolean MessageQueueService::messageOK(const Message *msg) |
{ | { |
if (_incoming_queue_shutdown.value() > 0) |
if (_incoming_queue_shutdown.get() > 0) |
return false; | return false; |
return true; | return true; |
} | } |
|
|
// ensure we do not accept any further messages | // ensure we do not accept any further messages |
| |
// ensure we don't recurse on IO_CLOSE | // ensure we don't recurse on IO_CLOSE |
if (_incoming_queue_shutdown.value() > 0) |
if (_incoming_queue_shutdown.get() > 0) |
break; | break; |
| |
// set the closing flag | // set the closing flag |
|
|
AsyncOpNode *operation; | AsyncOpNode *operation; |
try | try |
{ | { |
operation = service->_incoming.remove_first(); |
operation = service->_incoming.dequeue(); |
} | } |
catch(IPCException &) | catch(IPCException &) |
{ | { |
|
|
break; | break; |
} // message processing loop | } // message processing loop |
| |
// shutdown the AsyncDQueue |
// shutdown the AsyncQueue |
service->_incoming.shutdown_queue(); | service->_incoming.shutdown_queue(); |
return; | return; |
} | } |
|
|
} | } |
else | else |
{ | { |
op->_request.insert_first(msg); |
op->_request.insert_front(msg); |
(static_cast<AsyncMessage *>(msg))->op = op; | (static_cast<AsyncMessage *>(msg))->op = op; |
} | } |
return _meta_dispatcher->route_async(op); | return _meta_dispatcher->route_async(op); |
|
|
if (op == 0) | if (op == 0) |
{ | { |
op = get_op(); | op = get_op(); |
op->_request.insert_first(msg); |
op->_request.insert_front(msg); |
if (mask & message_mask::ha_async) | if (mask & message_mask::ha_async) |
{ | { |
(static_cast<AsyncMessage *>(msg))->op = op; | (static_cast<AsyncMessage *>(msg))->op = op; |
|
|
if (request->op == 0) | if (request->op == 0) |
{ | { |
request->op = get_op(); | request->op = get_op(); |
request->op->_request.insert_first(request); |
request->op->_request.insert_front(request); |
destroy_op = true; | destroy_op = true; |
} | } |
| |
|
|
request->op->_client_sem.wait(); | request->op->_client_sem.wait(); |
| |
request->op->lock(); | request->op->lock(); |
AsyncReply * rpl = static_cast<AsyncReply *>(request->op->_response.remove_first()); |
AsyncReply * rpl = static_cast<AsyncReply *>(request->op->_response.remove_front()); |
rpl->op = 0; | rpl->op = 0; |
request->op->unlock(); | request->op->unlock(); |
| |
|
|
| |
Uint32 MessageQueueService::get_next_xid() | Uint32 MessageQueueService::get_next_xid() |
{ | { |
static Mutex _monitor; |
AutoMutex autoMut(_xidMutex); |
Uint32 value; |
return ++_xid; |
AutoMutex autoMut(_monitor); |
|
_xid++; |
|
value = _xid.value(); |
|
return value; |
|
|
|
} | } |
| |
PEGASUS_NAMESPACE_END | PEGASUS_NAMESPACE_END |