(file) Return to MessageQueueService.cpp CVS log (file) (dir) Up to [Pegasus] / pegasus / src / Pegasus / Common

Diff for /pegasus/src/Pegasus/Common/MessageQueueService.cpp between version 1.82.6.1 and 1.88.2.3

version 1.82.6.1, 2003/10/29 22:09:18 version 1.88.2.3, 2005/08/12 22:52:42
Line 1 
Line 1 
 //%2003////////////////////////////////////////////////////////////////////////  //%2005////////////////////////////////////////////////////////////////////////
 // //
 // Copyright (c) 2000, 2001, 2002  BMC Software, Hewlett-Packard Development  // Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development
 // Company, L. P., IBM Corp., The Open Group, Tivoli Systems.  // Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems.
 // Copyright (c) 2003 BMC Software; Hewlett-Packard Development Company, L. P.; // Copyright (c) 2003 BMC Software; Hewlett-Packard Development Company, L. P.;
 // IBM Corp.; EMC Corporation, The Open Group. // IBM Corp.; EMC Corporation, The Open Group.
   // Copyright (c) 2004 BMC Software; Hewlett-Packard Development Company, L.P.;
   // IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group.
   // Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.;
   // EMC Corporation; VERITAS Software Corporation; The Open Group.
 // //
 // Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to // of this software and associated documentation files (the "Software"), to
Line 26 
Line 30 
 // Author: Mike Day (mdday@us.ibm.com) // Author: Mike Day (mdday@us.ibm.com)
 // //
 // Modified By: // Modified By:
   //              Amit K Arora, IBM (amita@in.ibm.com) for Bug#1090
 // //
 //%///////////////////////////////////////////////////////////////////////////// //%/////////////////////////////////////////////////////////////////////////////
  
Line 56 
Line 61 
    return _thread_pool;    return _thread_pool;
 } }
  
 void unload_idle_providers(void);  
   
 PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL  MessageQueueService::kill_idle_threads(void *parm) PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL  MessageQueueService::kill_idle_threads(void *parm)
 { {
  
Line 77 
Line 80 
  
       }       }
    }    }
    exit_thread((PEGASUS_THREAD_RETURN)dead_threads);  
    return (PEGASUS_THREAD_RETURN)dead_threads;  #ifdef PEGASUS_POINTER_64BIT
      return (PEGASUS_THREAD_RETURN)(Uint64)dead_threads;
   #elif PEGASUS_PLATFORM_AIX_RS_IBMCXX
      return (PEGASUS_THREAD_RETURN)(unsigned long)dead_threads;
   #else
      return (PEGASUS_THREAD_RETURN)(Uint32)dead_threads;
   #endif
 } }
  
  
Line 151 
Line 160 
       MessageQueueService *service = list->next(0);       MessageQueueService *service = list->next(0);
       while(service != NULL)       while(service != NULL)
       {       {
          if(service->_incoming.count() > 0 )           ThreadStatus rtn = PEGASUS_THREAD_OK;
          {           if (service->_incoming.count() > 0 &&
             _thread_pool->allocate_and_awaken(service, _req_proc);                service->_die.value() == 0)
            {
              rtn = _thread_pool->allocate_and_awaken(service,
                  _req_proc, &_polling_sem);
            }
            // if no more threads available, break from processing loop
            if (rtn != PEGASUS_THREAD_OK )
            {
               Logger::put(Logger::STANDARD_LOG, System::CIMSERVER, Logger::TRACE,
                  "Not enough threads to process this request. Skipping.");
   
               Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2,
                  "Could not allocate thread for %s. " \
                  "Queue has %d messages waiting and %d threads servicing." \
                  "Skipping the service for right now. ",
                  service->getQueueName(),
                  service->_incoming.count(),
                  service->_threads.value());
   
               pegasus_yield();
               service = NULL;
          }          }
            else
            {
          service = list->next(service);          service = list->next(service);
       }       }
         }
       list->unlock();       list->unlock();
       if(_check_idle_flag.value() != 0 )       if(_check_idle_flag.value() != 0 )
       {       {
          _check_idle_flag = 0;          _check_idle_flag = 0;
          Thread th(kill_idle_threads, 0, true);  
          th.run();           // try to do idle thread clean up processing when system is not busy
            // if system is busy there may not be a thread available to allocate
            // so nothing will be done and that is OK.
   
            if ( _thread_pool->allocate_and_awaken(service, kill_idle_threads,
                 &_polling_sem) != PEGASUS_THREAD_OK)
            {
                Logger::put(Logger::STANDARD_LOG, System::CIMSERVER, Logger::TRACE,
                   "Not enough threads to kill idle threads. What an irony.");
   
                Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2,
                   "Could not allocate thread to kill idle threads." \
                   "Skipping. ");
            }
       }       }
    }    }
    myself->exit_self( (PEGASUS_THREAD_RETURN) 1 );    myself->exit_self( (PEGASUS_THREAD_RETURN) 1 );
Line 178 
Line 223 
 MessageQueueService::MessageQueueService(const char *name, MessageQueueService::MessageQueueService(const char *name,
                                          Uint32 queueID,                                          Uint32 queueID,
                                          Uint32 capabilities,                                          Uint32 capabilities,
                                          Uint32 mask,                                           Uint32 mask)
                                          int threads)  
    : Base(name, true,  queueID),    : Base(name, true,  queueID),
  
      _mask(mask),      _mask(mask),
      _die(0),      _die(0),
        _threads(0),
      _incoming(true, 0),      _incoming(true, 0),
      _callback(true),      _callback(true),
      _incoming_queue_shutdown(0),      _incoming_queue_shutdown(0),
Line 198 
Line 243 
    _default_op_timeout.tv_sec = 30;    _default_op_timeout.tv_sec = 30;
    _default_op_timeout.tv_usec = 100;    _default_op_timeout.tv_usec = 100;
  
    _meta_dispatcher_mutex.lock(pegasus_thread_self());     AutoMutex autoMut(_meta_dispatcher_mutex);
  
    if( _meta_dispatcher == 0 )    if( _meta_dispatcher == 0 )
    {    {
         _stop_polling = 0;
       PEGASUS_ASSERT( _service_count.value() == 0 );       PEGASUS_ASSERT( _service_count.value() == 0 );
       _meta_dispatcher = new cimom();       _meta_dispatcher = new cimom();
       if (_meta_dispatcher == NULL )       if (_meta_dispatcher == NULL )
       {       {
          _meta_dispatcher_mutex.unlock();  
          throw NullPointer();          throw NullPointer();
       }       }
       _thread_pool = new ThreadPool(0, "MessageQueueService", 0, threads,        _thread_pool = new ThreadPool(0, "MessageQueueService", 0, 0,
                                     create_time, destroy_time, deadlock_time);                                     create_time, destroy_time, deadlock_time);
  
       _polling_thread = new Thread(polling_routine,       _polling_thread = new Thread(polling_routine,
                                    reinterpret_cast<void *>(&_polling_list),                                    reinterpret_cast<void *>(&_polling_list),
                                    false);                                    false);
       _polling_thread->run();        while (!_polling_thread->run())
         {
            pegasus_yield();
         }
    }    }
    _service_count++;    _service_count++;
  
    if( false == register_service(name, _capabilities, _mask) )    if( false == register_service(name, _capabilities, _mask) )
    {    {
       _meta_dispatcher_mutex.unlock();  
       //l10n       //l10n
       //throw BindFailedException("MessageQueueService Base Unable to register with  Meta Dispatcher");       //throw BindFailedException("MessageQueueService Base Unable to register with  Meta Dispatcher");
       MessageLoaderParms parms("Common.MessageQueueService.UNABLE_TO_REGISTER",       MessageLoaderParms parms("Common.MessageQueueService.UNABLE_TO_REGISTER",
Line 232 
Line 279 
  
    _polling_list.insert_last(this);    _polling_list.insert_last(this);
  
    _meta_dispatcher_mutex.unlock();  //   _meta_dispatcher_mutex.unlock();  //Bug#1090
 //   _callback_thread.run(); //   _callback_thread.run();
  
 //   _req_thread.run(); //   _req_thread.run();
Line 247 
Line 294 
    {    {
       _shutdown_incoming_queue();       _shutdown_incoming_queue();
    }    }
   
      while (_threads.value() > 0)
      {
          pegasus_yield();
      }
      _polling_list.remove(this);
   
    _callback_ready.signal();    _callback_ready.signal();
  
    _meta_dispatcher_mutex.lock(pegasus_thread_self());     {
        AutoMutex autoMut(_meta_dispatcher_mutex);
    _service_count--;    _service_count--;
    if (_service_count.value() == 0 )    if (_service_count.value() == 0 )
    {    {
Line 266 
Line 321 
       delete _thread_pool;       delete _thread_pool;
       _thread_pool = 0;       _thread_pool = 0;
    }    }
    _meta_dispatcher_mutex.unlock();     } // mutex unlocks here
    _polling_list.remove(this);  
    // Clean up in case there are extra stuff on the queue.    // Clean up in case there are extra stuff on the queue.
   while (_incoming.count())   while (_incoming.count())
   {   {
          try
          {
         delete _incoming.remove_first();         delete _incoming.remove_first();
   }   }
          catch (const ListClosed &e)
          {
             // If the list is closed, there is nothing we can do.
             break;
          }
      }
 } }
  
 void MessageQueueService::_shutdown_incoming_queue(void) void MessageQueueService::_shutdown_incoming_queue(void)
Line 299 
Line 361 
    msg->op->_op_dest = this;    msg->op->_op_dest = this;
    msg->op->_request.insert_first(msg);    msg->op->_request.insert_first(msg);
  
      try
      {
    _incoming.insert_last_wait(msg->op);    _incoming.insert_last_wait(msg->op);
          _polling_sem.signal();
      }
      catch (const ListClosed &)
      {
          // This means the queue has already been shut-down (happens  when there
          // are two AsyncIoctrl::IO_CLOSE messages generated and one got first
          // processed.
          delete msg;
      }
      catch (const Permission &)
      {
          delete msg;
      }
 } }
  
  
  
 void MessageQueueService::enqueue(Message *msg) throw(IPCException)  void MessageQueueService::enqueue(Message *msg)
 { {
    PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE, "MessageQueueService::enqueue()");    PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE, "MessageQueueService::enqueue()");
  
Line 356 
Line 432 
  
    if ( service->_die.value() == 0 )    if ( service->_die.value() == 0 )
     {     {
            service->_threads++;
          try          try
          {          {
             operation = service->_incoming.remove_first();             operation = service->_incoming.remove_first();
Line 363 
Line 440 
          catch(ListClosed & )          catch(ListClosed & )
          {          {
             operation = 0;             operation = 0;
               service->_threads--;
             return(0);             return(0);
          }          }
          if( operation )          if( operation )
Line 372 
Line 449 
             service->_handle_incoming_operation(operation);             service->_handle_incoming_operation(operation);
          }          }
     }     }
      service->_threads--;
    return(0);    return(0);
 } }
  
Line 1172 
Line 1249 
 { {
    static Mutex _monitor;    static Mutex _monitor;
    Uint32 value;    Uint32 value;
    _monitor.lock(pegasus_thread_self());     AutoMutex autoMut(_monitor);
   
    _xid++;    _xid++;
    value =  _xid.value();    value =  _xid.value();
    _monitor.unlock();  
    return value;    return value;
  
 } }


Legend:
Removed from v.1.82.6.1  
changed lines
  Added in v.1.88.2.3

No CVS admin address has been configured
Powered by
ViewCVS 0.9.2