version 1.55, 2002/06/07 15:17:13
|
version 1.88.2.6, 2005/08/23 22:34:37
|
|
|
//%////-*-c++-*-//////////////////////////////////////////////////////////////// |
//%2005//////////////////////////////////////////////////////////////////////// |
// | // |
// Copyright (c) 2000, 2001, 2002 BMC Software, Hewlett-Packard Company, IBM, |
// Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development |
// The Open Group, Tivoli Systems |
// Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems. |
|
// Copyright (c) 2003 BMC Software; Hewlett-Packard Development Company, L.P.; |
|
// IBM Corp.; EMC Corporation, The Open Group. |
|
// Copyright (c) 2004 BMC Software; Hewlett-Packard Development Company, L.P.; |
|
// IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group. |
|
// Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.; |
|
// EMC Corporation; VERITAS Software Corporation; The Open Group. |
// | // |
// Permission is hereby granted, free of charge, to any person obtaining a copy | // Permission is hereby granted, free of charge, to any person obtaining a copy |
// of this software and associated documentation files (the "Software"), to | // of this software and associated documentation files (the "Software"), to |
|
|
// Author: Mike Day (mdday@us.ibm.com) | // Author: Mike Day (mdday@us.ibm.com) |
// | // |
// Modified By: | // Modified By: |
|
// Amit K Arora, IBM (amita@in.ibm.com) for Bug#1090 |
// | // |
//%///////////////////////////////////////////////////////////////////////////// | //%///////////////////////////////////////////////////////////////////////////// |
| |
#include "MessageQueueService.h" | #include "MessageQueueService.h" |
#include <Pegasus/Common/Tracer.h> | #include <Pegasus/Common/Tracer.h> |
|
#include <Pegasus/Common/MessageLoader.h> //l10n |
| |
PEGASUS_NAMESPACE_BEGIN | PEGASUS_NAMESPACE_BEGIN |
| |
|
|
AtomicInt MessageQueueService::_xid(1); | AtomicInt MessageQueueService::_xid(1); |
Mutex MessageQueueService::_meta_dispatcher_mutex; | Mutex MessageQueueService::_meta_dispatcher_mutex; |
| |
static struct timeval create_time = {0, 10}; |
static struct timeval create_time = {0, 1}; |
static struct timeval destroy_time = {5, 0}; |
static struct timeval destroy_time = {300, 0}; |
static struct timeval deadlock_time = {100, 0}; |
static struct timeval deadlock_time = {0, 0}; |
| |
ThreadPool *MessageQueueService::_thread_pool = 0; | ThreadPool *MessageQueueService::_thread_pool = 0; |
| |
DQueue<MessageQueueService> MessageQueueService::_polling_list(true); | DQueue<MessageQueueService> MessageQueueService::_polling_list(true); |
| |
int MessageQueueService::kill_idle_threads(void) |
Thread* MessageQueueService::_polling_thread = 0; |
|
|
|
ThreadPool *MessageQueueService::get_thread_pool(void) |
{ | { |
static struct timeval now, last; |
return _thread_pool; |
|
} |
|
|
|
// |
|
// MAX_THREADS_PER_SVC_QUEUE_LIMIT |
|
// |
|
// JR Wunderlich Jun 6, 2005 |
|
// |
|
|
|
#define MAX_THREADS_PER_SVC_QUEUE_LIMIT 5000 |
|
#define MAX_THREADS_PER_SVC_QUEUE_DEFAULT 5 |
|
|
|
Uint32 max_threads_per_svc_queue; |
|
|
|
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::kill_idle_threads(void *parm) |
|
{ |
|
|
|
static struct timeval now, last = {0,0}; |
gettimeofday(&now, NULL); | gettimeofday(&now, NULL); |
|
int dead_threads = 0; |
| |
if( now.tv_sec - last.tv_sec > 0 ) |
if( now.tv_sec - last.tv_sec > 120 ) |
{ | { |
gettimeofday(&last, NULL); | gettimeofday(&last, NULL); |
return _thread_pool->kill_dead_threads(); |
try |
|
{ |
|
dead_threads = MessageQueueService::_thread_pool->kill_dead_threads(); |
|
} |
|
catch(...) |
|
{ |
|
|
|
} |
|
} |
|
|
|
#ifdef PEGASUS_POINTER_64BIT |
|
return (PEGASUS_THREAD_RETURN)(Uint64)dead_threads; |
|
#elif PEGASUS_PLATFORM_AIX_RS_IBMCXX |
|
return (PEGASUS_THREAD_RETURN)(unsigned long)dead_threads; |
|
#else |
|
return (PEGASUS_THREAD_RETURN)(Uint32)dead_threads; |
|
#endif |
|
} |
|
|
|
|
|
void MessageQueueService::force_shutdown(Boolean destroy_flag) |
|
{ |
|
return; |
|
|
|
#ifdef MESSAGEQUEUESERVICE_DEBUG |
|
//l10n |
|
MessageLoaderParms parms("Common.MessageQueueService.FORCING_SHUTDOWN", |
|
"Forcing shutdown of CIMOM Message Router"); |
|
PEGASUS_STD(cout) << MessageLoader::getMessage(parms) << PEGASUS_STD(endl); |
|
#endif |
|
|
|
|
|
MessageQueueService *svc; |
|
int counter = 0; |
|
_polling_list.lock(); |
|
svc = _polling_list.next(0); |
|
|
|
while(svc != 0) |
|
{ |
|
#ifdef MESSAGEQUEUESERVICE_DEBUG |
|
//l10n - reuse same MessageLoaderParms to avoid multiple creates |
|
parms.msg_id = "Common.MessageQueueService.STOPPING_SERVICE"; |
|
parms.default_msg = "Stopping $0"; |
|
parms.arg0 = svc->getQueueName(); |
|
PEGASUS_STD(cout) << MessageLoader::getMessage(parms) << PEGASUS_STD(endl); |
|
#endif |
|
|
|
_polling_sem.signal(); |
|
svc->_shutdown_incoming_queue(); |
|
counter++; |
|
_polling_sem.signal(); |
|
svc = _polling_list.next(svc); |
|
} |
|
_polling_list.unlock(); |
|
|
|
_polling_sem.signal(); |
|
|
|
MessageQueueService::_stop_polling = 1; |
|
|
|
if(destroy_flag == true) |
|
{ |
|
|
|
svc = _polling_list.remove_last(); |
|
while(svc) |
|
{ |
|
delete svc; |
|
svc = _polling_list.remove_last(); |
|
} |
|
|
} | } |
return 0; |
|
} | } |
| |
|
|
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::polling_routine(void *parm) | PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::polling_routine(void *parm) |
{ | { |
Thread *myself = reinterpret_cast<Thread *>(parm); | Thread *myself = reinterpret_cast<Thread *>(parm); |
|
|
DQueue<MessageQueueService> *list = reinterpret_cast<DQueue<MessageQueueService> *>(myself->get_parm()); | DQueue<MessageQueueService> *list = reinterpret_cast<DQueue<MessageQueueService> *>(myself->get_parm()); |
|
|
while ( _stop_polling.value() == 0 ) | while ( _stop_polling.value() == 0 ) |
{ | { |
_polling_sem.wait(); | _polling_sem.wait(); |
|
if(_stop_polling.value() != 0 ) |
|
{ |
|
break; |
|
} |
|
|
|
// The polling_routine thread must hold the lock on the |
|
// _polling_thread list while processing incoming messages. |
|
// This lock is used to give this thread ownership of |
|
// services on the _polling_routine list. |
|
|
|
// This is necessary to avoid confict with other threads |
|
// processing the _polling_list |
|
// (e.g., MessageQueueServer::~MessageQueueService). |
|
|
list->lock(); | list->lock(); |
MessageQueueService *service = list->next(0); | MessageQueueService *service = list->next(0); |
|
ThreadStatus rtn = PEGASUS_THREAD_OK; |
while(service != NULL) | while(service != NULL) |
{ | { |
if(service->_incoming.count() > 0 ) |
if ((service->_incoming.count() > 0) && |
|
(service->_die.value() == 0) && |
|
(service->_threads < max_threads_per_svc_queue)) |
|
{ |
|
// The _threads count is used to track the |
|
// number of active threads that have been allocated |
|
// to process messages for this service. |
|
|
|
// The _threads count MUST be incremented while |
|
// the polling_routine owns the _polling_thread |
|
// lock and has ownership of the service object. |
|
|
|
service->_threads++; |
|
try |
|
{ |
|
rtn = _thread_pool->allocate_and_awaken( |
|
service, _req_proc, &_polling_sem); |
|
} |
|
catch (...) |
{ | { |
_thread_pool->allocate_and_awaken(service, _req_proc); |
service->_threads--; |
| |
// service->_req_proc(service); |
// allocate_and_awaken should never generate an exception. |
|
PEGASUS_ASSERT(0); |
} | } |
|
// if no more threads available, break from processing loop |
|
if (rtn != PEGASUS_THREAD_OK ) |
|
{ |
|
service->_threads--; |
|
Logger::put(Logger::STANDARD_LOG, System::CIMSERVER, Logger::TRACE, |
|
"Not enough threads to process this request. Skipping."); |
|
|
|
Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2, |
|
"Could not allocate thread for %s. " \ |
|
"Queue has %d messages waiting and %d threads servicing." \ |
|
"Skipping the service for right now. ", |
|
service->getQueueName(), |
|
service->_incoming.count(), |
|
service->_threads.value()); |
| |
|
pegasus_yield(); |
|
service = NULL; |
|
} |
|
} |
|
if (service != NULL) |
|
{ |
service = list->next(service); | service = list->next(service); |
} | } |
|
} |
list->unlock(); | list->unlock(); |
|
|
|
if(_check_idle_flag.value() != 0 ) |
|
{ |
|
_check_idle_flag = 0; |
|
|
|
// try to do idle thread clean up processing when system is not busy |
|
// if system is busy there may not be a thread available to allocate |
|
// so nothing will be done and that is OK. |
|
|
|
if ( _thread_pool->allocate_and_awaken(service, kill_idle_threads, |
|
&_polling_sem) != PEGASUS_THREAD_OK) |
|
{ |
|
Logger::put(Logger::STANDARD_LOG, System::CIMSERVER, Logger::TRACE, |
|
"Not enough threads to kill idle threads. What an irony."); |
|
|
|
Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2, |
|
"Could not allocate thread to kill idle threads." \ |
|
"Skipping. "); |
|
} |
|
} |
} | } |
myself->exit_self( (PEGASUS_THREAD_RETURN) 1 ); | myself->exit_self( (PEGASUS_THREAD_RETURN) 1 ); |
return(0); | return(0); |
} | } |
| |
Thread MessageQueueService::_polling_thread(polling_routine, |
|
reinterpret_cast<void *>(&_polling_list), |
|
false); |
|
|
|
| |
Semaphore MessageQueueService::_polling_sem(0); | Semaphore MessageQueueService::_polling_sem(0); |
AtomicInt MessageQueueService::_stop_polling(0); | AtomicInt MessageQueueService::_stop_polling(0); |
|
AtomicInt MessageQueueService::_check_idle_flag(0); |
| |
| |
MessageQueueService::MessageQueueService(const char *name, | MessageQueueService::MessageQueueService(const char *name, |
|
|
| |
_mask(mask), | _mask(mask), |
_die(0), | _die(0), |
|
_threads(0), |
_incoming(true, 0), | _incoming(true, 0), |
_callback(true), | _callback(true), |
_incoming_queue_shutdown(0), | _incoming_queue_shutdown(0), |
|
|
_callback_thread(_callback_proc, this, false) | _callback_thread(_callback_proc, this, false) |
| |
{ | { |
|
|
_capabilities = (capabilities | module_capabilities::async); | _capabilities = (capabilities | module_capabilities::async); |
| |
_default_op_timeout.tv_sec = 30; | _default_op_timeout.tv_sec = 30; |
_default_op_timeout.tv_usec = 100; | _default_op_timeout.tv_usec = 100; |
| |
_meta_dispatcher_mutex.lock(pegasus_thread_self()); |
max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE; |
|
|
|
// if requested threads gt MAX_THREADS_PER_SVC_QUEUE_LIMIT |
|
// then set to MAX_THREADS_PER_SVC_QUEUE_LIMIT |
|
|
|
if (max_threads_per_svc_queue > MAX_THREADS_PER_SVC_QUEUE_LIMIT) |
|
{ |
|
max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE_LIMIT; |
|
} |
|
|
|
// if requested threads eq 0 (unlimited) |
|
// then set to MAX_THREADS_PER_SVC_QUEUE_LIMIT |
|
|
|
if (max_threads_per_svc_queue == 0) |
|
{ |
|
max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE_DEFAULT; |
|
} |
|
|
|
// cout << "MAX_THREADS_PER_SVC_QUEUE = " << MAX_THREADS_PER_SVC_QUEUE << endl; |
|
// cout << "max_threads_per_svc_queue set to = " << max_threads_per_svc_queue << endl; |
|
|
|
AutoMutex autoMut(_meta_dispatcher_mutex); |
| |
if( _meta_dispatcher == 0 ) | if( _meta_dispatcher == 0 ) |
{ | { |
|
_stop_polling = 0; |
PEGASUS_ASSERT( _service_count.value() == 0 ); | PEGASUS_ASSERT( _service_count.value() == 0 ); |
_meta_dispatcher = new cimom(); | _meta_dispatcher = new cimom(); |
if (_meta_dispatcher == NULL ) | if (_meta_dispatcher == NULL ) |
{ | { |
_meta_dispatcher_mutex.unlock(); |
|
throw NullPointer(); | throw NullPointer(); |
} | } |
_thread_pool = new ThreadPool(0, "MessageQueueService", 0, 20, |
_thread_pool = new ThreadPool(0, "MessageQueueService", 0, 0, |
create_time, destroy_time, deadlock_time); | create_time, destroy_time, deadlock_time); |
_polling_thread.run(); |
|
|
_polling_thread = new Thread(polling_routine, |
|
reinterpret_cast<void *>(&_polling_list), |
|
false); |
|
while (!_polling_thread->run()) |
|
{ |
|
pegasus_yield(); |
|
} |
} | } |
_service_count++; | _service_count++; |
| |
if( false == register_service(name, _capabilities, _mask) ) | if( false == register_service(name, _capabilities, _mask) ) |
{ | { |
_meta_dispatcher_mutex.unlock(); |
//l10n |
throw BindFailed("MessageQueueService Base Unable to register with Meta Dispatcher"); |
//throw BindFailedException("MessageQueueService Base Unable to register with Meta Dispatcher"); |
|
MessageLoaderParms parms("Common.MessageQueueService.UNABLE_TO_REGISTER", |
|
"MessageQueueService Base Unable to register with Meta Dispatcher"); |
|
|
|
throw BindFailedException(parms); |
} | } |
| |
_polling_list.insert_last(this); | _polling_list.insert_last(this); |
| |
_meta_dispatcher_mutex.unlock(); |
// _meta_dispatcher_mutex.unlock(); //Bug#1090 |
// _callback_thread.run(); | // _callback_thread.run(); |
| |
// _req_thread.run(); | // _req_thread.run(); |
|
|
MessageQueueService::~MessageQueueService(void) | MessageQueueService::~MessageQueueService(void) |
{ | { |
_die = 1; | _die = 1; |
|
|
|
// The polling_routine locks the _polling_list while |
|
// processing the incoming messages for services on the |
|
// list. Deleting the service from the _polling_list |
|
// prior to processing, avoids synchronization issues |
|
// with the _polling_routine. |
|
|
|
_polling_list.remove(this); |
|
|
|
_callback_ready.signal(); |
|
|
|
// ATTN: The code for closing the _incoming queue |
|
// is not working correctly. In OpenPegasus 2.4, |
|
// execution of the following code is very timing |
|
// dependent. This needs to be fix. |
|
// See Bug 4079 for details. |
if (_incoming_queue_shutdown.value() == 0 ) | if (_incoming_queue_shutdown.value() == 0 ) |
{ | { |
_shutdown_incoming_queue(); | _shutdown_incoming_queue(); |
} | } |
_callback_ready.signal(); |
|
// _callback_thread.join(); |
|
| |
_meta_dispatcher_mutex.lock(pegasus_thread_self()); |
// Wait until all threads processing the messages |
|
// for this service have completed. |
|
|
|
while (_threads.value() > 0) |
|
{ |
|
pegasus_yield(); |
|
} |
|
|
|
{ |
|
AutoMutex autoMut(_meta_dispatcher_mutex); |
_service_count--; | _service_count--; |
if (_service_count.value() == 0 ) | if (_service_count.value() == 0 ) |
{ | { |
|
|
_stop_polling++; | _stop_polling++; |
_polling_sem.signal(); | _polling_sem.signal(); |
_polling_thread.join(); |
_polling_thread->join(); |
|
delete _polling_thread; |
|
_polling_thread = 0; |
_meta_dispatcher->_shutdown_routed_queue(); | _meta_dispatcher->_shutdown_routed_queue(); |
delete _meta_dispatcher; | delete _meta_dispatcher; |
_meta_dispatcher = 0; | _meta_dispatcher = 0; |
| |
|
delete _thread_pool; |
|
_thread_pool = 0; |
|
} |
|
} // mutex unlocks here |
|
// Clean up in case there are extra stuff on the queue. |
|
while (_incoming.count()) |
|
{ |
|
try |
|
{ |
|
delete _incoming.remove_first(); |
|
} |
|
catch (const ListClosed &e) |
|
{ |
|
// If the list is closed, there is nothing we can do. |
|
break; |
|
} |
} | } |
_meta_dispatcher_mutex.unlock(); |
|
_polling_list.remove(this); |
|
} | } |
| |
void MessageQueueService::_shutdown_incoming_queue(void) | void MessageQueueService::_shutdown_incoming_queue(void) |
{ | { |
| |
|
|
if (_incoming_queue_shutdown.value() > 0 ) | if (_incoming_queue_shutdown.value() > 0 ) |
return ; | return ; |
AsyncIoctl *msg = new AsyncIoctl(get_next_xid(), | AsyncIoctl *msg = new AsyncIoctl(get_next_xid(), |
|
|
msg->op->_op_dest = this; | msg->op->_op_dest = this; |
msg->op->_request.insert_first(msg); | msg->op->_request.insert_first(msg); |
| |
|
try |
|
{ |
_incoming.insert_last_wait(msg->op); | _incoming.insert_last_wait(msg->op); |
|
_polling_sem.signal(); |
// _req_thread.join(); |
} |
|
catch (const ListClosed &) |
|
{ |
|
// This means the queue has already been shut-down (happens when there |
|
// are two AsyncIoctrl::IO_CLOSE messages generated and one got first |
|
// processed. |
|
delete msg; |
|
} |
|
catch (const Permission &) |
|
{ |
|
delete msg; |
|
} |
} | } |
| |
| |
| |
void MessageQueueService::enqueue(Message *msg) throw(IPCException) |
void MessageQueueService::enqueue(Message *msg) |
{ | { |
PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE, "MessageQueueService::enqueue()"); | PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE, "MessageQueueService::enqueue()"); |
| |
|
|
| |
PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::_req_proc(void * parm) | PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::_req_proc(void * parm) |
{ | { |
// Thread *myself = reinterpret_cast<Thread *>(parm); |
|
// MessageQueueService *service = reinterpret_cast<MessageQueueService *>(myself->get_parm()); |
|
MessageQueueService *service = reinterpret_cast<MessageQueueService *>(parm); | MessageQueueService *service = reinterpret_cast<MessageQueueService *>(parm); |
// pull messages off the incoming queue and dispatch them. then | // pull messages off the incoming queue and dispatch them. then |
// check pending messages that are non-blocking | // check pending messages that are non-blocking |
AsyncOpNode *operation = 0; | AsyncOpNode *operation = 0; |
| |
// while ( service->_die.value() == 0 ) |
if ( service->_die.value() == 0 ) |
// { |
{ |
try | try |
{ | { |
operation = service->_incoming.remove_first(); | operation = service->_incoming.remove_first(); |
} | } |
catch(ListClosed & ) | catch(ListClosed & ) |
{ | { |
// break; |
operation = 0; |
|
service->_threads--; |
|
return(0); |
} | } |
if( operation ) | if( operation ) |
{ | { |
// operation->_thread_ptr = pegasus_thread_self(); |
|
operation->_service_ptr = service; | operation->_service_ptr = service; |
service->_handle_incoming_operation(operation); | service->_handle_incoming_operation(operation); |
} | } |
// } |
} |
|
service->_threads--; |
// myself->exit_self( (PEGASUS_THREAD_RETURN) 1 ); |
|
return(0); | return(0); |
} | } |
| |
|
|
else | else |
{ | { |
PEGASUS_ASSERT(rq != 0 ); | PEGASUS_ASSERT(rq != 0 ); |
// ATTN: optimization |
|
// << Wed Mar 6 15:00:39 2002 mdd >> |
|
// put thread and queue into the asyncopnode structure. |
|
// (static_cast<AsyncMessage *>(rq))->_myself = operation->_thread_ptr; |
|
// (static_cast<AsyncMessage *>(rq))->_service = operation->_service_ptr; |
|
// done << Tue Mar 12 14:49:07 2002 mdd >> |
|
operation->unlock(); | operation->unlock(); |
_handle_async_request(static_cast<AsyncRequest *>(rq)); | _handle_async_request(static_cast<AsyncRequest *>(rq)); |
} | } |
|
|
AsyncRequest *async = static_cast<AsyncRequest *>(request->_async); | AsyncRequest *async = static_cast<AsyncRequest *>(request->_async); |
AsyncOpNode *op = async->op; | AsyncOpNode *op = async->op; |
request->_async = 0; | request->_async = 0; |
// this request is probably going to be deleted !! |
// the legacy request is going to be deleted by its handler |
// remove it from the op node | // remove it from the op node |
op->_request.remove(request); |
|
|
static_cast<AsyncLegacyOperationStart *>(async)->get_action(); |
|
|
| |
AsyncLegacyOperationResult *async_result = | AsyncLegacyOperationResult *async_result = |
new AsyncLegacyOperationResult( | new AsyncLegacyOperationResult( |
|
|
Boolean MessageQueueService::accept_async(AsyncOpNode *op) | Boolean MessageQueueService::accept_async(AsyncOpNode *op) |
{ | { |
if (_incoming_queue_shutdown.value() > 0 ) | if (_incoming_queue_shutdown.value() > 0 ) |
return false; |
return true; |
| |
// ATTN optimization remove the message checking altogether in the base | // ATTN optimization remove the message checking altogether in the base |
// << Mon Feb 18 14:02:20 2002 mdd >> | // << Mon Feb 18 14:02:20 2002 mdd >> |
|
|
_polling_sem.signal(); | _polling_sem.signal(); |
return true; | return true; |
} | } |
// else |
|
// { |
|
// if( (rq != 0 && (true == MessageQueueService::messageOK(rq))) || |
|
// (rp != 0 && ( true == MessageQueueService::messageOK(rp) )) && |
|
// _die.value() == 0) |
|
// { |
|
// MessageQueueService::_incoming.insert_last_wait(op); |
|
// return true; |
|
// } |
|
// } |
|
|
|
return false; | return false; |
} | } |
| |
|
|
return true; | return true; |
} | } |
| |
|
|
// made pure virtual |
|
// << Wed Mar 6 15:11:31 2002 mdd >> |
|
// void MessageQueueService::handleEnqueue(Message *msg) |
|
// { |
|
// if ( msg ) |
|
// delete msg; |
|
// } |
|
|
|
// made pure virtual |
|
// << Wed Mar 6 15:11:56 2002 mdd >> |
|
// void MessageQueueService::handleEnqueue(void) |
|
// { |
|
// Message *msg = dequeue(); |
|
// handleEnqueue(msg); |
|
// } |
|
|
|
void MessageQueueService::handle_heartbeat_request(AsyncRequest *req) | void MessageQueueService::handle_heartbeat_request(AsyncRequest *req) |
{ | { |
// default action is to echo a heartbeat response | // default action is to echo a heartbeat response |
|
|
{ | { |
case AsyncIoctl::IO_CLOSE: | case AsyncIoctl::IO_CLOSE: |
{ | { |
// save my bearings |
|
// Thread *myself = req->op->_thread_ptr; |
|
MessageQueueService *service = static_cast<MessageQueueService *>(req->op->_service_ptr); | MessageQueueService *service = static_cast<MessageQueueService *>(req->op->_service_ptr); |
| |
// respond to this message. |
#ifdef MESSAGEQUEUESERVICE_DEBUG |
|
PEGASUS_STD(cout) << service->getQueueName() << " Received AsyncIoctl::IO_CLOSE " << PEGASUS_STD(endl); |
|
#endif |
|
|
|
// respond to this message. this is fire and forget, so we don't need to delete anything. |
|
// this takes care of two problems that were being found |
|
// << Thu Oct 9 10:52:48 2003 mdd >> |
_make_response(req, async_results::OK); | _make_response(req, async_results::OK); |
// ensure we do not accept any further messages | // ensure we do not accept any further messages |
| |
|
|
} | } |
if( operation ) | if( operation ) |
{ | { |
// operation->_thread_ptr = myself; |
|
operation->_service_ptr = service; | operation->_service_ptr = service; |
service->_handle_incoming_operation(operation); | service->_handle_incoming_operation(operation); |
} | } |
|
|
| |
// shutdown the AsyncDQueue | // shutdown the AsyncDQueue |
service->_incoming.shutdown_queue(); | service->_incoming.shutdown_queue(); |
// exit the thread ! |
|
// myself->exit_self( (PEGASUS_THREAD_RETURN) 1 ); |
|
return; | return; |
} | } |
| |
|
|
| |
void MessageQueueService::handle_CimServiceStart(CimServiceStart *req) | void MessageQueueService::handle_CimServiceStart(CimServiceStart *req) |
{ | { |
|
|
|
#ifdef MESSAGEQUEUESERVICE_DEBUG |
|
PEGASUS_STD(cout) << getQueueName() << "received START" << PEGASUS_STD(endl); |
|
#endif |
|
|
// clear the stoped bit and update | // clear the stoped bit and update |
_capabilities &= (~(module_capabilities::stopped)); | _capabilities &= (~(module_capabilities::stopped)); |
_make_response(req, async_results::OK); | _make_response(req, async_results::OK); |
|
|
} | } |
void MessageQueueService::handle_CimServiceStop(CimServiceStop *req) | void MessageQueueService::handle_CimServiceStop(CimServiceStop *req) |
{ | { |
|
#ifdef MESSAGEQUEUESERVICE_DEBUG |
|
PEGASUS_STD(cout) << getQueueName() << "received STOP" << PEGASUS_STD(endl); |
|
#endif |
// set the stopeed bit and update | // set the stopeed bit and update |
_capabilities |= module_capabilities::stopped; | _capabilities |= module_capabilities::stopped; |
_make_response(req, async_results::CIM_STOPPED); | _make_response(req, async_results::CIM_STOPPED); |
|
|
| |
Boolean destroy_op = false; | Boolean destroy_op = false; |
| |
if (request->op == false) |
if (request->op == 0) |
{ | { |
request->op = get_op(); | request->op = get_op(); |
request->op->_request.insert_first(request); | request->op->_request.insert_first(request); |
|
|
| |
Uint32 MessageQueueService::get_next_xid(void) | Uint32 MessageQueueService::get_next_xid(void) |
{ | { |
|
static Mutex _monitor; |
|
Uint32 value; |
|
AutoMutex autoMut(_monitor); |
_xid++; | _xid++; |
return _xid.value(); |
value = _xid.value(); |
|
return value; |
|
|
} | } |
| |
PEGASUS_NAMESPACE_END | PEGASUS_NAMESPACE_END |