(file) Return to MessageQueueService.cpp CVS log (file) (dir) Up to [Pegasus] / pegasus / src / Pegasus / Common

Diff for /pegasus/src/Pegasus/Common/MessageQueueService.cpp between version 1.151 and 1.157

version 1.151, 2008/11/05 05:24:34 version 1.157, 2008/12/09 18:14:20
Line 1 
Line 1 
 //%2006////////////////////////////////////////////////////////////////////////  //%LICENSE////////////////////////////////////////////////////////////////
 // //
 // Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development  // Licensed to The Open Group (TOG) under one or more contributor license
 // Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems.  // agreements.  Refer to the OpenPegasusNOTICE.txt file distributed with
 // Copyright (c) 2003 BMC Software; Hewlett-Packard Development Company, L.P.;  // this work for additional information regarding copyright ownership.
 // IBM Corp.; EMC Corporation, The Open Group.  // Each contributor licenses this file to you under the OpenPegasus Open
 // Copyright (c) 2004 BMC Software; Hewlett-Packard Development Company, L.P.;  // Source License; you may not use this file except in compliance with the
 // IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group.  // License.
 // Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.;  
 // EMC Corporation; VERITAS Software Corporation; The Open Group.  
 // Copyright (c) 2006 Hewlett-Packard Development Company, L.P.; IBM Corp.;  
 // EMC Corporation; Symantec Corporation; The Open Group.  
 // //
 // Permission is hereby granted, free of charge, to any person obtaining a copy  // Permission is hereby granted, free of charge, to any person obtaining a
 // of this software and associated documentation files (the "Software"), to  // copy of this software and associated documentation files (the "Software"),
 // deal in the Software without restriction, including without limitation the  // to deal in the Software without restriction, including without limitation
 // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or  // the rights to use, copy, modify, merge, publish, distribute, sublicense,
 // sell copies of the Software, and to permit persons to whom the Software is  // and/or sell copies of the Software, and to permit persons to whom the
 // furnished to do so, subject to the following conditions:  // Software is furnished to do so, subject to the following conditions:
 // //
 // THE ABOVE COPYRIGHT NOTICE AND THIS PERMISSION NOTICE SHALL BE INCLUDED IN  // The above copyright notice and this permission notice shall be included
 // ALL COPIES OR SUBSTANTIAL PORTIONS OF THE SOFTWARE. THE SOFTWARE IS PROVIDED  // in all copies or substantial portions of the Software.
 // "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT  
 // LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR  
 // PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT  
 // HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN  
 // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION  
 // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  
 // //
 //==============================================================================  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
   // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
   // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
   // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
   // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
   // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
   //
   //////////////////////////////////////////////////////////////////////////
 // //
 //%///////////////////////////////////////////////////////////////////////////// //%/////////////////////////////////////////////////////////////////////////////
  
Line 46 
Line 44 
 ThreadPool *MessageQueueService::_thread_pool = 0; ThreadPool *MessageQueueService::_thread_pool = 0;
  
 MessageQueueService::PollingList* MessageQueueService::_polling_list; MessageQueueService::PollingList* MessageQueueService::_polling_list;
 Mutex MessageQueueService::_polling_list_mutex;  Boolean MessageQueueService::_monitoring_polling_list = false;
  
 Thread* MessageQueueService::_polling_thread = 0; Thread* MessageQueueService::_polling_thread = 0;
  
   /*
       PollingListEntry holds the service and it's status whether the service
       is dead or not. Each service creates its own PollingListEntry and added
       to the PollingList which is monitored by the polling thread. Polling thread
       monitors the service only if it's die flag is not set.
   */
   
   struct PollingListEntry : public Linkable
   {
       MessageQueueService *service;
       Boolean die;
   
       PollingListEntry(MessageQueueService *service)
           :service(service),
            die(false)
       {
       }
       ~PollingListEntry()
       {
       }
   private:
       PollingListEntry(const PollingListEntry&);
       PollingListEntry& operator = (const PollingListEntry&);
   };
   
 ThreadPool *MessageQueueService::get_thread_pool() ThreadPool *MessageQueueService::get_thread_pool()
 { {
    return _thread_pool;    return _thread_pool;
Line 74 
Line 97 
     void* parm)     void* parm)
 { {
     Thread *myself = reinterpret_cast<Thread *>(parm);     Thread *myself = reinterpret_cast<Thread *>(parm);
     List<MessageQueueService, Mutex> *list =      MessageQueueService::PollingList *list =
         reinterpret_cast<List<MessageQueueService, Mutex>*>(myself->get_parm());          reinterpret_cast<MessageQueueService::PollingList*>(myself->get_parm());
  
     while (_stop_polling.get()  == 0)     while (_stop_polling.get()  == 0)
     {     {
Line 86 
Line 109 
             break;             break;
         }         }
  
         // The polling_routine thread must hold the lock on the          PollingListEntry *entry = list->front();
         // _polling_list while processing incoming messages.  
         // This lock is used to give this thread ownership of  
         // services on the _polling_routine list.  
   
         // This is necessary to avoid confict with other threads  
         // processing the _polling_list  
         // (e.g., MessageQueueServer::~MessageQueueService).  
   
         list->lock();  
         MessageQueueService *service = list->front();  
         ThreadStatus rtn = PEGASUS_THREAD_OK;         ThreadStatus rtn = PEGASUS_THREAD_OK;
         while (service != NULL)  
           // Setting this flag to 'true' will prevent race condition between
           // the polling thread and thread executing the MessageQueueService
           // destructor.
           PEGASUS_ASSERT(_monitoring_polling_list == false);
           _monitoring_polling_list = true;
   
           do
         {         {
             if ((service->_incoming.count() > 0) &&              MessageQueueService *service = entry->service;
                 (service->_die.get() == 0) &&              // Note: MessageQueueService destructor sets die flag when service
               // gets destroyed during CIMOM shutdown. Don't monitor the service
               // if die flag set.
               if ((entry->die == false) &&
                   (service->_incoming.count() > 0) &&
                 (service->_threads.get() < max_threads_per_svc_queue))                 (service->_threads.get() < max_threads_per_svc_queue))
             {             {
                 // The _threads count is used to track the                 // The _threads count is used to track the
                 // number of active threads that have been allocated                 // number of active threads that have been allocated
                 // to process messages for this service.                 // to process messages for this service.
  
                 // The _threads count MUST be incremented while  
                 // the polling_routine owns the _polling_thread  
                 // lock and has ownership of the service object.  
   
                 service->_threads++;                 service->_threads++;
                 try                 try
                 {                 {
Line 138 
Line 158 
                         service->_threads.get()));                         service->_threads.get()));
  
                     Threads::yield();                     Threads::yield();
                     service = NULL;                      break;
                 }  
             }  
             if (service != NULL)  
             {  
                 service = list->next_of(service);  
             }             }
         }         }
         list->unlock();              entry = list->next_of(entry);
           } while (entry != NULL);
           _monitoring_polling_list = false;
     }     }
     return ThreadReturnType(0);     return ThreadReturnType(0);
 } }
Line 160 
Line 177 
     const char* name,     const char* name,
     Uint32 queueID)     Uint32 queueID)
     : Base(name, true,  queueID),     : Base(name, true,  queueID),
       _die(0),  
       _threads(0),       _threads(0),
       _incoming(),       _incoming(),
       _incoming_queue_shutdown(0)       _incoming_queue_shutdown(0)
Line 181 
Line 197 
     PEG_TRACE((TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL3,     PEG_TRACE((TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL3,
        "max_threads_per_svc_queue set to %u.", max_threads_per_svc_queue));        "max_threads_per_svc_queue set to %u.", max_threads_per_svc_queue));
  
     AutoMutex autoMut(_meta_dispatcher_mutex);      AutoMutex mtx(_meta_dispatcher_mutex);
  
     if (_meta_dispatcher == 0)     if (_meta_dispatcher == 0)
     {     {
Line 197 
Line 213 
     }     }
     _service_count++;     _service_count++;
  
     _get_polling_list()->insert_back(this);      // Add to the polling list
       if (!_polling_list)
       {
           _polling_list = new PollingList;
       }
       pollingListEntry = new PollingListEntry(this);
       _polling_list->insert_back(pollingListEntry);
 } }
  
  
 MessageQueueService::~MessageQueueService() MessageQueueService::~MessageQueueService()
 { {
     _die = 1;      // Close incoming queue.
   
     // The polling_routine locks the _polling_list while  
     // processing the incoming messages for services on the  
     // list.  Deleting the service from the _polling_list  
     // prior to processing, avoids synchronization issues  
     // with the _polling_routine.  
   
     // ATTN: added to prevent assertion in List in which the list does not  
     // contain this element.  
   
     if (_get_polling_list()->contains(this))  
         _get_polling_list()->remove(this);  
   
     // ATTN: The code for closing the _incoming queue  
     // is not working correctly. In OpenPegasus 2.5,  
     // execution of the following code is very timing  
     // dependent. This needs to be fix.  
     // See Bug 4079 for details.  
     if (_incoming_queue_shutdown.get() == 0)     if (_incoming_queue_shutdown.get() == 0)
     {     {
         _shutdown_incoming_queue();          AsyncIoClose *msg = new AsyncIoClose(
               0,
               _queueId,
               _queueId,
               true);
           SendForget(msg);
           // Wait until our queue has been shutdown.
           while (_incoming_queue_shutdown.get() == 0)
           {
               Threads::yield();
           }
     }     }
  
       // Die now. Setting this flag to true instructs the polling thread not to
       // monitor this service.
       pollingListEntry->die = true;
   
     // Wait until all threads processing the messages     // Wait until all threads processing the messages
     // for this service have completed.     // for this service have completed.
   
     while (_threads.get() > 0)     while (_threads.get() > 0)
     {     {
         Threads::yield();         Threads::yield();
     }     }
  
       // Wait until monitoring the polling list is done.
       while (_monitoring_polling_list)
     {     {
         AutoMutex autoMut(_meta_dispatcher_mutex);          Threads::yield();
       }
   
       {
           AutoMutex mtx(_meta_dispatcher_mutex);
   
         _service_count--;         _service_count--;
           // If we are last service to die, delete metadispatcher.
         if (_service_count.get() == 0)         if (_service_count.get() == 0)
         {         {
   
             _stop_polling++;             _stop_polling++;
             _polling_sem.signal();             _polling_sem.signal();
             if (_polling_thread)             if (_polling_thread)
Line 249 
Line 273 
                 delete _polling_thread;                 delete _polling_thread;
                 _polling_thread = 0;                 _polling_thread = 0;
             }             }
             _meta_dispatcher->_shutdown_routed_queue();  
             delete _meta_dispatcher;             delete _meta_dispatcher;
             _meta_dispatcher = 0;             _meta_dispatcher = 0;
  
             delete _thread_pool;             delete _thread_pool;
             _thread_pool = 0;             _thread_pool = 0;
   
               // Cleanup polling list
               PollingListEntry *entry;
               while ((entry = _polling_list->remove_front()))
               {
                   delete entry;
               }
           }
         }         }
     } // mutex unlocks here  
  
     // Clean up any extra stuff on the queue.     // Clean up any extra stuff on the queue.
     AsyncOpNode* op = 0;     AsyncOpNode* op = 0;
Line 266 
Line 296 
     }     }
 } }
  
 void MessageQueueService::_shutdown_incoming_queue()  
 {  
     if (_incoming_queue_shutdown.get() > 0)  
         return;  
   
     AsyncIoctl *msg = new AsyncIoctl(  
         0,  
         _queueId,  
         _queueId,  
         true,  
         AsyncIoctl::IO_CLOSE,  
         0,  
         0);  
   
     msg->op = get_op();  
     msg->op->_flags = ASYNC_OPFLAGS_FIRE_AND_FORGET;  
   
     msg->op->_op_dest = this;  
     msg->op->_request.reset(msg);  
     if (_incoming.enqueue(msg->op))  
     {  
         _polling_sem.signal();  
     }  
     else  
     {  
         // This means the queue has already been shut-down (happens  when there  
         // are two AsyncIoctrl::IO_CLOSE messages generated and one got first  
         // processed.  
         delete msg;  
     }  
 }  
   
   
 void MessageQueueService::enqueue(Message* msg) void MessageQueueService::enqueue(Message* msg)
 { {
     PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE, "MessageQueueService::enqueue()");     PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE, "MessageQueueService::enqueue()");
Line 317 
Line 314 
     PEGASUS_ASSERT(service != 0);     PEGASUS_ASSERT(service != 0);
     try     try
     {     {
         if (service->_die.get() != 0)          if (service->pollingListEntry->die)
         {         {
             service->_threads--;             service->_threads--;
             return 0;             return 0;
Line 413 
Line 410 
 void MessageQueueService::_handle_async_request(AsyncRequest *req) void MessageQueueService::_handle_async_request(AsyncRequest *req)
 { {
     MessageType type = req->getType();     MessageType type = req->getType();
     if (type == ASYNC_IOCTL)      if (type == ASYNC_IOCLOSE)
     {     {
         handle_AsyncIoctl(static_cast<AsyncIoctl *>(req));          handle_AsyncIoClose(static_cast<AsyncIoClose*>(req));
     }     }
     else if (type == ASYNC_CIMSERVICE_START)     else if (type == ASYNC_CIMSERVICE_START)
     {     {
Line 489 
Line 486 
     cimom::_make_response(req, code);     cimom::_make_response(req, code);
 } }
  
   
 void MessageQueueService::_completeAsyncResponse( void MessageQueueService::_completeAsyncResponse(
     AsyncRequest* request,     AsyncRequest* request,
     AsyncReply* reply)     AsyncReply* reply)
Line 516 
Line 512 
         return false;         return false;
     if (_polling_thread == NULL)     if (_polling_thread == NULL)
     {     {
           PEGASUS_ASSERT(_polling_list);
         _polling_thread = new Thread(         _polling_thread = new Thread(
             polling_routine,             polling_routine,
             reinterpret_cast<void *>(_get_polling_list()),              reinterpret_cast<void *>(_polling_list),
             false);             false);
         ThreadStatus tr = PEGASUS_THREAD_OK;         ThreadStatus tr = PEGASUS_THREAD_OK;
         while ( (tr =_polling_thread->run()) != PEGASUS_THREAD_OK)         while ( (tr =_polling_thread->run()) != PEGASUS_THREAD_OK)
Line 531 
Line 528 
                     "Could not allocate thread for the polling thread."));                     "Could not allocate thread for the polling thread."));
         }         }
     }     }
     if (_die.get() == 0)      if (pollingListEntry->die == false)
     {     {
         if (_incoming.enqueue(op))         if (_incoming.enqueue(op))
         {         {
Line 542 
Line 539 
     return false;     return false;
 } }
  
 void MessageQueueService::handle_AsyncIoctl(AsyncIoctl* req)  void MessageQueueService::handle_AsyncIoClose(AsyncIoClose *req)
 {  
     switch (req->ctl)  
     {  
         case AsyncIoctl::IO_CLOSE:  
         {         {
             MessageQueueService *service =             MessageQueueService *service =
                 static_cast<MessageQueueService *>(req->op->_service_ptr);          static_cast<MessageQueueService*>(req->op->_op_dest);
  
 #ifdef MESSAGEQUEUESERVICE_DEBUG #ifdef MESSAGEQUEUESERVICE_DEBUG
             PEGASUS_STD(cout) << service->getQueueName() <<             PEGASUS_STD(cout) << service->getQueueName() <<
                 " Received AsyncIoctl::IO_CLOSE " << PEGASUS_STD(endl);          " Received AsyncIoClose " << PEGASUS_STD(endl);
 #endif #endif
       // set the closing flag, don't accept any more messages
       service->_incoming_queue_shutdown = 1;
  
             // respond to this message. this is fire and forget, so we             // respond to this message. this is fire and forget, so we
             // don't need to delete anything.             // don't need to delete anything.
             // this takes care of two problems that were being found             // this takes care of two problems that were being found
             // << Thu Oct  9 10:52:48 2003 mdd >>             // << Thu Oct  9 10:52:48 2003 mdd >>
             _make_response(req, async_results::OK);             _make_response(req, async_results::OK);
             // ensure we do not accept any further messages  
   
             // ensure we don't recurse on IO_CLOSE  
             if (_incoming_queue_shutdown.get() > 0)  
                 break;  
   
             // set the closing flag  
             service->_incoming_queue_shutdown = 1;  
             // empty out the queue  
             while (1)  
             {  
                 AsyncOpNode* operation = 0;  
                 try  
                 {  
                     operation = service->_incoming.dequeue();  
                 }  
                 catch (...)  
                 {  
                     break;  
                 }  
                 if (operation)  
                 {  
                     operation->_service_ptr = service;  
                     service->_handle_incoming_operation(operation);  
                 }  
                 else  
                     break;  
             } // message processing loop  
   
             // shutdown the AsyncQueue  
             service->_incoming.close();  
             return;  
         }  
   
         default:  
             _make_response(req, async_results::CIM_NAK);  
     }  
 } }
  
 void MessageQueueService::handle_CimServiceStart(CimServiceStart* req) void MessageQueueService::handle_CimServiceStart(CimServiceStart* req)
Line 769 
Line 727 
     return queue->getQueueId();     return queue->getQueueId();
 } }
  
 MessageQueueService::PollingList* MessageQueueService::_get_polling_list()  
 {  
     _polling_list_mutex.lock();  
   
     if (!_polling_list)  
         _polling_list = new PollingList;  
   
     _polling_list_mutex.unlock();  
   
     return _polling_list;  
 }  
   
 PEGASUS_NAMESPACE_END PEGASUS_NAMESPACE_END


Legend:
Removed from v.1.151  
changed lines
  Added in v.1.157

No CVS admin address has been configured
Powered by
ViewCVS 0.9.2