(file) Return to MessageQueueService.cpp CVS log (file) (dir) Up to [Pegasus] / pegasus / src / Pegasus / Common

Diff for /pegasus/src/Pegasus/Common/MessageQueueService.cpp between version 1.75 and 1.76

version 1.75, 2003/09/25 06:17:57 version 1.76, 2003/10/10 14:31:27
Line 80 
Line 80 
 } }
  
  
 void MessageQueueService::force_shutdown(void)  void MessageQueueService::force_shutdown(Boolean destroy_flag)
 { {
  
 #ifdef MESSAGEQUEUESERVICE_DEBUG #ifdef MESSAGEQUEUESERVICE_DEBUG
         //l10n         //l10n
    //PEGASUS_STD(cout) << "Forcing shutdown of CIMOM Message Router" << PEGASUS_STD(endl);  
    MessageLoaderParms parms("Common.MessageQueueService.FORCING_SHUTDOWN",    MessageLoaderParms parms("Common.MessageQueueService.FORCING_SHUTDOWN",
                             "Forcing shutdown of CIMOM Message Router");                             "Forcing shutdown of CIMOM Message Router");
    PEGASUS_STD(cout) << MessageLoader::getMessage(parms) << PEGASUS_STD(endl);    PEGASUS_STD(cout) << MessageLoader::getMessage(parms) << PEGASUS_STD(endl);
 #endif #endif
      PEGASUS_STD(cout) << "MessageQueueService::force_shutdown()" << PEGASUS_STD(endl);
   
   
  
    //MessageQueueService::_stop_polling = 1;  
    MessageQueueService *svc;    MessageQueueService *svc;
    int counter = 0;    int counter = 0;
    _polling_list.lock();    _polling_list.lock();
Line 101 
Line 102 
    {    {
 #ifdef MESSAGEQUEUESERVICE_DEBUG #ifdef MESSAGEQUEUESERVICE_DEBUG
                 //l10n - reuse same MessageLoaderParms to avoid multiple creates                 //l10n - reuse same MessageLoaderParms to avoid multiple creates
         //PEGASUS_STD(cout) << "Stopping " << svc->getQueueName() << PEGASUS_STD(endl);  
         parms.msg_id = "Common.MessageQueueService.STOPPING_SERVICE";         parms.msg_id = "Common.MessageQueueService.STOPPING_SERVICE";
                 parms.default_msg = "Stopping $0";                 parms.default_msg = "Stopping $0";
                 parms.arg0 = svc->getQueueName();                 parms.arg0 = svc->getQueueName();
Line 118 
Line 118 
  
    _polling_sem.signal();    _polling_sem.signal();
  
   while ( counter != 0) {     PEGASUS_STD(cout) << "Force shutdown found " << counter << " services" << PEGASUS_STD(endl);
         Thread::sleep(100);  
         _polling_list.lock();     MessageQueueService::_stop_polling = 1;
         svc = _polling_list.next(0);  
         while (svc != 0 ) {     if(destroy_flag == true)
                 if (svc ->_incoming_queue_shutdown.value() == 1 ) {     {
                         counter--;  
                 }        svc = _polling_list.remove_last();
                 svc  = _polling_list.next(svc);        while(svc)
         {
            PEGASUS_STD(cout) << "preparing to delete " << svc->getQueueName() << PEGASUS_STD(endl);
   
            delete svc;
            svc = _polling_list.remove_last();
         }         }
         _polling_list.unlock();  
    }    }
    MessageQueueService::_stop_polling = 1;  
 } }
  
  
Line 240 
Line 244 
 MessageQueueService::~MessageQueueService(void) MessageQueueService::~MessageQueueService(void)
 { {
    _die = 1;    _die = 1;
    // IBM-KR: This causes a new message (IO_CLOSE) to be spawned, which  
    // doesn't get picked up anyone. The idea was that the message would be     if (_incoming_queue_shutdown.value() == 0 )
    // picked up handle_AsyncIoctl which closes the queue and does cleaning.     {
    // That described behavior has never surfaced itself. If it does appear,        _shutdown_incoming_queue();
    // uncomment the if ( ..) { } block below.     }
   
    // Note: The handle_AsyncIcotl does get called when force_shutdown(void) gets  
    // called during Pegasus shutdown procedure (in case you ever wondered).  
   
    //if (_incoming_queue_shutdown.value() == 0 )  
    //{  
    //   _shutdown_incoming_queue();  
    //}  
    _callback_ready.signal();    _callback_ready.signal();
 //   _callback_thread.join();  
  
    _meta_dispatcher_mutex.lock(pegasus_thread_self());    _meta_dispatcher_mutex.lock(pegasus_thread_self());
    _service_count--;    _service_count--;
    if (_service_count.value() == 0 )    if (_service_count.value() == 0 )
    {    {
   
       _stop_polling++;       _stop_polling++;
       _polling_sem.signal();       _polling_sem.signal();
       _polling_thread->join();       _polling_thread->join();
Line 268 
Line 264 
       _meta_dispatcher->_shutdown_routed_queue();       _meta_dispatcher->_shutdown_routed_queue();
       delete _meta_dispatcher;       delete _meta_dispatcher;
       _meta_dispatcher = 0;       _meta_dispatcher = 0;
   
       delete _thread_pool;       delete _thread_pool;
       _thread_pool = 0;       _thread_pool = 0;
    }    }
Line 283 
Line 280 
 void MessageQueueService::_shutdown_incoming_queue(void) void MessageQueueService::_shutdown_incoming_queue(void)
 { {
  
   
    if (_incoming_queue_shutdown.value() > 0 )    if (_incoming_queue_shutdown.value() > 0 )
       return ;       return ;
    AsyncIoctl *msg = new AsyncIoctl(get_next_xid(),    AsyncIoctl *msg = new AsyncIoctl(get_next_xid(),
Line 305 
Line 303 
  
    _incoming.insert_last_wait(msg->op);    _incoming.insert_last_wait(msg->op);
  
 //   _req_thread.join();  
   
 } }
  
  
Line 507 
Line 503 
       else       else
       {       {
          PEGASUS_ASSERT(rq != 0 );          PEGASUS_ASSERT(rq != 0 );
          // ATTN: optimization  
          // << Wed Mar  6 15:00:39 2002 mdd >>  
          // put thread and queue into the asyncopnode structure.  
          //  (static_cast<AsyncMessage *>(rq))->_myself = operation->_thread_ptr;  
          //   (static_cast<AsyncMessage *>(rq))->_service = operation->_service_ptr;  
          // done << Tue Mar 12 14:49:07 2002 mdd >>  
          operation->unlock();          operation->unlock();
          _handle_async_request(static_cast<AsyncRequest *>(rq));          _handle_async_request(static_cast<AsyncRequest *>(rq));
       }       }
Line 652 
Line 642 
       _polling_sem.signal();       _polling_sem.signal();
       return true;       return true;
    }    }
 //    else  
 //    {  
 //       if(  (rq != 0 && (true == MessageQueueService::messageOK(rq))) ||  
 //         (rp != 0 && ( true == MessageQueueService::messageOK(rp) )) &&  
 //         _die.value() == 0)  
 //       {  
 //       MessageQueueService::_incoming.insert_last_wait(op);  
 //       return true;  
 //       }  
 //    }  
   
    return false;    return false;
 } }
  
Line 673 
Line 652 
    return true;    return true;
 } }
  
   
 // made pure virtual  
 // << Wed Mar  6 15:11:31 2002 mdd >>  
 // void MessageQueueService::handleEnqueue(Message *msg)  
 // {  
 //    if ( msg )  
 //       delete msg;  
 // }  
   
 // made pure virtual  
 // << Wed Mar  6 15:11:56 2002 mdd >>  
 // void MessageQueueService::handleEnqueue(void)  
 // {  
 //     Message *msg = dequeue();  
 //     handleEnqueue(msg);  
 // }  
   
 void MessageQueueService::handle_heartbeat_request(AsyncRequest *req) void MessageQueueService::handle_heartbeat_request(AsyncRequest *req)
 { {
    // default action is to echo a heartbeat response    // default action is to echo a heartbeat response
Line 719 
Line 681 
    {    {
       case AsyncIoctl::IO_CLOSE:       case AsyncIoctl::IO_CLOSE:
       {       {
          // save my bearings  
 //       Thread *myself = req->op->_thread_ptr;  
          MessageQueueService *service = static_cast<MessageQueueService *>(req->op->_service_ptr);          MessageQueueService *service = static_cast<MessageQueueService *>(req->op->_service_ptr);
  
          // respond to this message.           // respond to this message. this is fire and forget, so we don't need to delete anything.
          // _make_response(req, async_results::OK);           // this takes care of two problems that were being found
            // << Thu Oct  9 10:52:48 2003 mdd >>
             _make_response(req, async_results::OK);
          // ensure we do not accept any further messages          // ensure we do not accept any further messages
  
          // ensure we don't recurse on IO_CLOSE          // ensure we don't recurse on IO_CLOSE
Line 747 
Line 710 
             }             }
             if( operation )             if( operation )
             {             {
 //             operation->_thread_ptr = myself;  
                operation->_service_ptr = service;                operation->_service_ptr = service;
                service->_handle_incoming_operation(operation);                service->_handle_incoming_operation(operation);
             }             }
Line 757 
Line 719 
  
          // shutdown the AsyncDQueue          // shutdown the AsyncDQueue
          service->_incoming.shutdown_queue();          service->_incoming.shutdown_queue();
          AsyncOpNode *op = req->op;  
          op->_request.remove_first();  
          op->release();  
          return_op(op);  
          delete req;  
          // exit the thread !  
 //       myself->exit_self( (PEGASUS_THREAD_RETURN) 1 );  
          return;          return;
       }       }
  


Legend:
Removed from v.1.75  
changed lines
  Added in v.1.76

No CVS admin address has been configured
Powered by
ViewCVS 0.9.2