(file) Return to MessageQueueService.cpp CVS log (file) (dir) Up to [Pegasus] / pegasus / src / Pegasus / Common

Diff for /pegasus/src/Pegasus/Common/MessageQueueService.cpp between version 1.109 and 1.123.2.2

version 1.109, 2005/05/31 14:20:04 version 1.123.2.2, 2006/07/28 20:46:41
Line 1 
Line 1 
 //%2005////////////////////////////////////////////////////////////////////////  //%2006////////////////////////////////////////////////////////////////////////
 // //
 // Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development // Copyright (c) 2000, 2001, 2002 BMC Software; Hewlett-Packard Development
 // Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems. // Company, L.P.; IBM Corp.; The Open Group; Tivoli Systems.
Line 8 
Line 8 
 // IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group. // IBM Corp.; EMC Corporation; VERITAS Software Corporation; The Open Group.
 // Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.; // Copyright (c) 2005 Hewlett-Packard Development Company, L.P.; IBM Corp.;
 // EMC Corporation; VERITAS Software Corporation; The Open Group. // EMC Corporation; VERITAS Software Corporation; The Open Group.
   // Copyright (c) 2006 Hewlett-Packard Development Company, L.P.; IBM Corp.;
   // EMC Corporation; Symantec Corporation; The Open Group.
 // //
 // Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to // of this software and associated documentation files (the "Software"), to
Line 32 
Line 34 
 // Modified By: // Modified By:
 //              Amit K Arora, IBM (amita@in.ibm.com) for Bug#1090,#2657 //              Amit K Arora, IBM (amita@in.ibm.com) for Bug#1090,#2657
 //              Josephine Eskaline Joyce, IBM (jojustin@in.ibm.com) for Bug#3259 //              Josephine Eskaline Joyce, IBM (jojustin@in.ibm.com) for Bug#3259
   //              Jim Wunderlich (Jim_Wunderlich@prodigy.net)
 // //
 //%///////////////////////////////////////////////////////////////////////////// //%/////////////////////////////////////////////////////////////////////////////
  
   // #include <iostream.h>
 #include "MessageQueueService.h" #include "MessageQueueService.h"
 #include <Pegasus/Common/Tracer.h> #include <Pegasus/Common/Tracer.h>
 #include <Pegasus/Common/MessageLoader.h> //l10n #include <Pegasus/Common/MessageLoader.h> //l10n
Line 42 
Line 46 
 PEGASUS_NAMESPACE_BEGIN PEGASUS_NAMESPACE_BEGIN
  
 cimom *MessageQueueService::_meta_dispatcher = 0; cimom *MessageQueueService::_meta_dispatcher = 0;
 AtomicInt MessageQueueService::_service_count = 0;  AtomicInt MessageQueueService::_service_count(0);
 AtomicInt MessageQueueService::_xid(1);  
 Mutex MessageQueueService::_meta_dispatcher_mutex; Mutex MessageQueueService::_meta_dispatcher_mutex;
  
 static struct timeval deallocateWait = {300, 0}; static struct timeval deallocateWait = {300, 0};
  
 ThreadPool *MessageQueueService::_thread_pool = 0; ThreadPool *MessageQueueService::_thread_pool = 0;
  
 DQueue<MessageQueueService> MessageQueueService::_polling_list(true);  MessageQueueService::PollingList* MessageQueueService::_polling_list;
   Mutex MessageQueueService::_polling_list_mutex;
  
 Thread* MessageQueueService::_polling_thread = 0; Thread* MessageQueueService::_polling_thread = 0;
  
Line 59 
Line 63 
    return _thread_pool;    return _thread_pool;
 } }
  
 PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL  //
   // MAX_THREADS_PER_SVC_QUEUE
   //
   // JR Wunderlich Jun 6, 2005
   //
   
   #define MAX_THREADS_PER_SVC_QUEUE_LIMIT 5000
   #define MAX_THREADS_PER_SVC_QUEUE_DEFAULT 5
   
   #ifndef MAX_THREADS_PER_SVC_QUEUE
   # define MAX_THREADS_PER_SVC_QUEUE MAX_THREADS_PER_SVC_QUEUE_DEFAULT
   #endif
   
   Uint32 max_threads_per_svc_queue;
   
   ThreadReturnType PEGASUS_THREAD_CDECL
 MessageQueueService::kill_idle_threads(void *parm) MessageQueueService::kill_idle_threads(void *parm)
 { {
  
    static struct timeval now, last = {0,0};    static struct timeval now, last = {0,0};
    gettimeofday(&now, NULL);     Time::gettimeofday(&now);
    int dead_threads = 0;    int dead_threads = 0;
  
    if (now.tv_sec - last.tv_sec > 120)    if (now.tv_sec - last.tv_sec > 120)
    {    {
       gettimeofday(&last, NULL);        Time::gettimeofday(&last);
       try       try
       {       {
          dead_threads = MessageQueueService::_thread_pool->cleanupIdleThreads();          dead_threads = MessageQueueService::_thread_pool->cleanupIdleThreads();
Line 81 
Line 100 
    }    }
  
 #ifdef PEGASUS_POINTER_64BIT #ifdef PEGASUS_POINTER_64BIT
    return (PEGASUS_THREAD_RETURN)(Uint64)dead_threads;     return (ThreadReturnType)(Uint64)dead_threads;
 #elif PEGASUS_PLATFORM_AIX_RS_IBMCXX #elif PEGASUS_PLATFORM_AIX_RS_IBMCXX
    return (PEGASUS_THREAD_RETURN)(unsigned long)dead_threads;     return (ThreadReturnType)(unsigned long)dead_threads;
 #else #else
    return (PEGASUS_THREAD_RETURN)(Uint32)dead_threads;     return (ThreadReturnType)(Uint32)dead_threads;
 #endif #endif
 } }
  
 PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::polling_routine(void *parm)  ThreadReturnType PEGASUS_THREAD_CDECL MessageQueueService::polling_routine(void *parm)
 { {
    Thread *myself = reinterpret_cast<Thread *>(parm);    Thread *myself = reinterpret_cast<Thread *>(parm);
    DQueue<MessageQueueService> *list = reinterpret_cast<DQueue<MessageQueueService> *>(myself->get_parm());     List<MessageQueueService, Mutex> *list =
    while (_stop_polling.value()  == 0)         reinterpret_cast<List<MessageQueueService, Mutex>*>(myself->get_parm());
   
      while (_stop_polling.get()  == 0)
    {    {
       _polling_sem.wait();       _polling_sem.wait();
  
       if (_stop_polling.value() != 0)        if (_stop_polling.get() != 0)
       {       {
          break;          break;
       }       }
  
         // The polling_routine thread must hold the lock on the
         // _polling_thread list while processing incoming messages.
         // This lock is used to give this thread ownership of
         // services on the _polling_routine list.
   
         // This is necessary to avoid confict with other threads
         // processing the _polling_list
         // (e.g., MessageQueueServer::~MessageQueueService).
   
       list->lock();       list->lock();
       MessageQueueService *service = list->next(0);        MessageQueueService *service = list->front();
         ThreadStatus rtn = PEGASUS_THREAD_OK;
       while(service != NULL)       while(service != NULL)
       {       {
          if (service->_incoming.count() > 0 && service->_die.value() == 0)            if ((service->_incoming.count() > 0) &&
                 (service->_die.get() == 0) &&
                 (service->_threads.get() < max_threads_per_svc_queue))
             {
                // The _threads count is used to track the
                // number of active threads that have been allocated
                // to process messages for this service.
   
                // The _threads count MUST be incremented while
                // the polling_routine owns the _polling_thread
                // lock and has ownership of the service object.
   
                service->_threads++;
                try
          {          {
             _thread_pool->allocate_and_awaken(service, _req_proc);                   rtn = _thread_pool->allocate_and_awaken(
                         service, _req_proc, &_polling_sem);
                }
                catch (...)
                {
                    service->_threads--;
   
                    // allocate_and_awaken should never generate an exception.
                    PEGASUS_ASSERT(0);
                }
                // if no more threads available, break from processing loop
                if (rtn != PEGASUS_THREAD_OK )
                {
                    service->_threads--;
                    Logger::put(Logger::STANDARD_LOG, System::CIMSERVER, Logger::TRACE,
                       "Not enough threads to process this request. Skipping.");
   
                    Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2,
                       "Could not allocate thread for %s. " \
                       "Queue has %d messages waiting and %d threads servicing." \
                       "Skipping the service for right now. ",
                       service->getQueueName(),
                       service->_incoming.count(),
                       service->_threads.get());
   
                    Threads::yield();
                    service = NULL;
                 }
             }
             if (service != NULL)
             {
                service = list->next_of(service);
          }          }
          service = list->next(service);  
       }       }
       list->unlock();       list->unlock();
       if (_check_idle_flag.value() != 0)  
         if (_check_idle_flag.get() != 0)
       {       {
          _check_idle_flag = 0;          _check_idle_flag = 0;
            // try to do idle thread clean up processing when system is not busy
            // if system is busy there may not be a thread available to allocate
            // so nothing will be done and that is OK.
  
          // If there are insufficent resources to run           if ( _thread_pool->allocate_and_awaken(service, kill_idle_threads, &_polling_sem) != PEGASUS_THREAD_OK)
          // kill_idle_threads, then just return.           {
          _thread_pool->allocate_and_awaken(service, kill_idle_threads);                  Logger::put(Logger::STANDARD_LOG, System::CIMSERVER, Logger::TRACE,
                           "Not enough threads to kill idle threads. What an irony.");
   
                   Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2,
                           "Could not allocate thread to kill idle threads." \
                           "Skipping. ");
       }       }
   
   
    }    }
    myself->exit_self( (PEGASUS_THREAD_RETURN) 1 );     }
      myself->exit_self( (ThreadReturnType) 1 );
    return(0);    return(0);
 } }
  
Line 141 
Line 227 
      _mask(mask),      _mask(mask),
      _die(0),      _die(0),
         _threads(0),         _threads(0),
      _incoming(true, 0),       _incoming(),
      _incoming_queue_shutdown(0)      _incoming_queue_shutdown(0)
 { {
   
    _capabilities = (capabilities | module_capabilities::async);    _capabilities = (capabilities | module_capabilities::async);
  
    _default_op_timeout.tv_sec = 30;    _default_op_timeout.tv_sec = 30;
    _default_op_timeout.tv_usec = 100;    _default_op_timeout.tv_usec = 100;
  
      max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE;
   
      // if requested thread max is out of range, then set to
      // MAX_THREADS_PER_SVC_QUEUE_LIMIT
   
      if ((max_threads_per_svc_queue < 1) ||
          (max_threads_per_svc_queue > MAX_THREADS_PER_SVC_QUEUE_LIMIT))
      {
          max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE_LIMIT;
      }
   
      Tracer::trace(TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL2,
         "max_threads_per_svc_queue set to %u.", max_threads_per_svc_queue);
   
    AutoMutex autoMut(_meta_dispatcher_mutex);    AutoMutex autoMut(_meta_dispatcher_mutex);
  
    if (_meta_dispatcher == 0)    if (_meta_dispatcher == 0)
    {    {
       _stop_polling = 0;       _stop_polling = 0;
       PEGASUS_ASSERT(_service_count.value() == 0);        PEGASUS_ASSERT(_service_count.get() == 0);
       _meta_dispatcher = new cimom();       _meta_dispatcher = new cimom();
       if (_meta_dispatcher == NULL)       if (_meta_dispatcher == NULL)
       {       {
          throw NullPointer();          throw NullPointer();
       }       }
         //  _thread_pool = new ThreadPool(initial_cnt, "MessageQueueService",
         //   minimum_cnt, maximum_cnt, deallocateWait);
         //
       _thread_pool =       _thread_pool =
           new ThreadPool(0, "MessageQueueService", 0, 0, deallocateWait);           new ThreadPool(0, "MessageQueueService", 0, 0, deallocateWait);
    }    }
Line 175 
Line 279 
       throw BindFailedException(parms);       throw BindFailedException(parms);
    }    }
  
    _polling_list.insert_last(this);     _get_polling_list()->insert_back(this);
  
 } }
  
Line 184 
Line 288 
 { {
    _die = 1;    _die = 1;
  
    if (_incoming_queue_shutdown.value() == 0)     // The polling_routine locks the _polling_list while
      // processing the incoming messages for services on the
      // list.  Deleting the service from the _polling_list
      // prior to processing, avoids synchronization issues
      // with the _polling_routine.
   
      // ATTN: added to prevent assertion in List in which the list does not
      // contain this element.
   
      if (_get_polling_list()->contains(this))
          _get_polling_list()->remove(this);
   
      // ATTN: The code for closing the _incoming queue
      // is not working correctly. In OpenPegasus 2.5,
      // execution of the following code is very timing
      // dependent. This needs to be fix.
      // See Bug 4079 for details.
      if (_incoming_queue_shutdown.get() == 0)
    {    {
       _shutdown_incoming_queue();       _shutdown_incoming_queue();
   
    }    }
  
  while (_threads.value() > 0)     // Wait until all threads processing the messages
      // for this service have completed.
   
      while (_threads.get() > 0)
      {      {
           pegasus_yield();        Threads::yield();
      }      }
    _polling_list.remove(this);  
    {    {
      AutoMutex autoMut(_meta_dispatcher_mutex);      AutoMutex autoMut(_meta_dispatcher_mutex);
      _service_count--;      _service_count--;
      if (_service_count.value() == 0)       if (_service_count.get() == 0)
      {      {
  
       _stop_polling++;       _stop_polling++;
Line 220 
Line 343 
   while (_incoming.count())   while (_incoming.count())
   {   {
     try {     try {
       delete _incoming.remove_first();        delete _incoming.dequeue();
     } catch (const ListClosed &e)     } catch (const ListClosed &e)
     {     {
       // If the list is closed, there is nothing we can do.       // If the list is closed, there is nothing we can do.
Line 231 
Line 354 
  
 void MessageQueueService::_shutdown_incoming_queue() void MessageQueueService::_shutdown_incoming_queue()
 { {
    if (_incoming_queue_shutdown.value() > 0)     if (_incoming_queue_shutdown.get() > 0)
       return;       return;
  
    AsyncIoctl *msg = new AsyncIoctl(    AsyncIoctl *msg = new AsyncIoctl(
       get_next_xid(),  
       0,       0,
       _queueId,       _queueId,
       _queueId,       _queueId,
Line 251 
Line 373 
    msg->op->_state &= ~ASYNC_OPSTATE_COMPLETE;    msg->op->_state &= ~ASYNC_OPSTATE_COMPLETE;
  
    msg->op->_op_dest = this;    msg->op->_op_dest = this;
    msg->op->_request.insert_first(msg);     msg->op->_request.reset(msg);
    try {    try {
      _incoming.insert_last_wait(msg->op);       _incoming.enqueue_wait(msg->op);
      _polling_sem.signal();      _polling_sem.signal();
    } catch (const ListClosed &)    } catch (const ListClosed &)
    {    {
Line 262 
Line 384 
     // processed.     // processed.
      delete msg;      delete msg;
    }    }
      catch (const Permission &)
      {
        delete msg;
      }
 } }
  
  
Line 276 
Line 402 
 } }
  
  
 PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL MessageQueueService::_req_proc(  ThreadReturnType PEGASUS_THREAD_CDECL MessageQueueService::_req_proc(
     void * parm)     void * parm)
 { {
     MessageQueueService* service =     MessageQueueService* service =
Line 285 
Line 411 
     try     try
     {     {
  
         if (service->_die.value() != 0)          if (service->_die.get() != 0)
         {         {
               service->_threads--;
             return (0);             return (0);
         }         }
             service->_threads++;  
         // pull messages off the incoming queue and dispatch them. then         // pull messages off the incoming queue and dispatch them. then
         // check pending messages that are non-blocking         // check pending messages that are non-blocking
         AsyncOpNode *operation = 0;         AsyncOpNode *operation = 0;
Line 299 
Line 425 
         {         {
             try             try
             {             {
                 operation = service->_incoming.remove_first();                  operation = service->_incoming.dequeue();
             }             }
             catch (ListClosed &)             catch (ListClosed &)
             {             {
Line 348 
Line 474 
    if (op->_flags & ASYNC_OPFLAGS_SAFE_CALLBACK)    if (op->_flags & ASYNC_OPFLAGS_SAFE_CALLBACK)
    {    {
  
       Message *msg = op->get_request();        Message *msg = op->removeRequest();
       if (msg && (msg->getMask() & message_mask::ha_async))       if (msg && (msg->getMask() & message_mask::ha_async))
       {       {
          if (msg->getType() == async_messages::ASYNC_LEGACY_OP_START)          if (msg->getType() == async_messages::ASYNC_LEGACY_OP_START)
Line 375 
Line 501 
          delete msg;          delete msg;
       }       }
  
       msg = op->get_response();        msg = op->removeResponse();
       if (msg && (msg->getMask() & message_mask::ha_async))       if (msg && (msg->getMask() & message_mask::ha_async))
       {       {
          if (msg->getType() == async_messages::ASYNC_LEGACY_OP_RESULT)          if (msg->getType() == async_messages::ASYNC_LEGACY_OP_RESULT)
Line 419 
Line 545 
 // << Tue Feb 19 14:10:38 2002 mdd >> // << Tue Feb 19 14:10:38 2002 mdd >>
       operation->lock();       operation->lock();
  
       Message *rq = operation->_request.next(0);        Message *rq = operation->_request.get();
  
 // optimization <<< Thu Mar  7 21:04:05 2002 mdd >>> // optimization <<< Thu Mar  7 21:04:05 2002 mdd >>>
 // move this to the bottom of the loop when the majority of // move this to the bottom of the loop when the majority of
Line 428 
Line 554 
       // divert legacy messages to handleEnqueue       // divert legacy messages to handleEnqueue
       if ((rq != 0) && (!(rq->getMask() & message_mask::ha_async)))       if ((rq != 0) && (!(rq->getMask() & message_mask::ha_async)))
       {       {
          rq = operation->_request.remove_first() ;           operation->_request.release();
          operation->unlock();          operation->unlock();
          // delete the op node          // delete the op node
          operation->release();          operation->release();
Line 522 
Line 648 
  
       AsyncLegacyOperationResult *async_result =       AsyncLegacyOperationResult *async_result =
          new AsyncLegacyOperationResult(          new AsyncLegacyOperationResult(
             async->getKey(),  
             async->getRouting(),  
             op,             op,
             response);             response);
       _completeAsyncResponse(       _completeAsyncResponse(
Line 574 
Line 698 
  
 Boolean MessageQueueService::accept_async(AsyncOpNode *op) Boolean MessageQueueService::accept_async(AsyncOpNode *op)
 { {
    if (_incoming_queue_shutdown.value() > 0)     if (_incoming_queue_shutdown.get() > 0)
       return false;       return false;
    if (_polling_thread == NULL)    if (_polling_thread == NULL)
    {    {
       _polling_thread = new Thread(       _polling_thread = new Thread(
           polling_routine,           polling_routine,
           reinterpret_cast<void *>(&_polling_list),            reinterpret_cast<void *>(_get_polling_list()),
           false);           false);
       while (!_polling_thread->run())        ThreadStatus tr = PEGASUS_THREAD_OK;
         while ( (tr =_polling_thread->run()) != PEGASUS_THREAD_OK)
       {       {
          pegasus_yield();          if (tr == PEGASUS_THREAD_INSUFFICIENT_RESOURCES)
              Threads::yield();
           else
              throw Exception(MessageLoaderParms("Common.MessageQueueService.NOT_ENOUGH_THREAD",
                           "Could not allocate thread for the polling thread."));
       }       }
    }    }
 // ATTN optimization remove the message checking altogether in the base // ATTN optimization remove the message checking altogether in the base
 // << Mon Feb 18 14:02:20 2002 mdd >> // << Mon Feb 18 14:02:20 2002 mdd >>
    op->lock();    op->lock();
    Message *rq = op->_request.next(0);     Message *rq = op->_request.get();
    Message *rp = op->_response.next(0);     Message *rp = op->_response.get();
    op->unlock();    op->unlock();
  
    if ((rq != 0 && (true == messageOK(rq))) ||    if ((rq != 0 && (true == messageOK(rq))) ||
        (rp != 0 && (true == messageOK(rp))) && _die.value() == 0)         (rp != 0 && (true == messageOK(rp))) && _die.get() == 0)
    {    {
       _incoming.insert_last_wait(op);        _incoming.enqueue_wait(op);
       _polling_sem.signal();       _polling_sem.signal();
       return true;       return true;
    }    }
Line 606 
Line 735 
  
 Boolean MessageQueueService::messageOK(const Message *msg) Boolean MessageQueueService::messageOK(const Message *msg)
 { {
    if (_incoming_queue_shutdown.value() > 0)     if (_incoming_queue_shutdown.get() > 0)
       return false;       return false;
    return true;    return true;
 } }
Line 617 
Line 746 
  
    AsyncReply *reply = new AsyncReply(    AsyncReply *reply = new AsyncReply(
       async_messages::HEARTBEAT,       async_messages::HEARTBEAT,
       req->getKey(),  
       req->getRouting(),  
       0,       0,
       req->op,       req->op,
       async_results::OK,       async_results::OK,
Line 651 
Line 778 
          // ensure we do not accept any further messages          // ensure we do not accept any further messages
  
          // ensure we don't recurse on IO_CLOSE          // ensure we don't recurse on IO_CLOSE
          if (_incoming_queue_shutdown.value() > 0)           if (_incoming_queue_shutdown.get() > 0)
             break;             break;
  
          // set the closing flag          // set the closing flag
Line 662 
Line 789 
             AsyncOpNode *operation;             AsyncOpNode *operation;
             try             try
             {             {
                operation = service->_incoming.remove_first();                 operation = service->_incoming.dequeue();
             }             }
             catch(IPCException &)             catch(IPCException &)
             {             {
Line 677 
Line 804 
                break;                break;
          } // message processing loop          } // message processing loop
  
          // shutdown the AsyncDQueue           // shutdown the AsyncQueue
          service->_incoming.shutdown_queue();           service->_incoming.close();
          return;          return;
       }       }
  
Line 786 
Line 913 
  
 void MessageQueueService::return_op(AsyncOpNode *op) void MessageQueueService::return_op(AsyncOpNode *op)
 { {
    PEGASUS_ASSERT(op->read_state() & ASYNC_OPSTATE_RELEASED);     PEGASUS_ASSERT(op->_state & ASYNC_OPSTATE_RELEASED);
    delete op;    delete op;
 } }
  
Line 869 
Line 996 
    if (!(msg->getMask() & message_mask::ha_async))    if (!(msg->getMask() & message_mask::ha_async))
    {    {
       AsyncLegacyOperationStart *wrapper = new AsyncLegacyOperationStart(       AsyncLegacyOperationStart *wrapper = new AsyncLegacyOperationStart(
          get_next_xid(),  
          op,          op,
          destination,          destination,
          msg,          msg,
Line 877 
Line 1003 
    }    }
    else    else
    {    {
       op->_request.insert_first(msg);        op->_request.reset(msg);
       (static_cast<AsyncMessage *>(msg))->op = op;       (static_cast<AsyncMessage *>(msg))->op = op;
    }    }
    return _meta_dispatcher->route_async(op);    return _meta_dispatcher->route_async(op);
Line 897 
Line 1023 
    if (op == 0)    if (op == 0)
    {    {
       op = get_op();       op = get_op();
       op->_request.insert_first(msg);        op->_request.reset(msg);
       if (mask & message_mask::ha_async)       if (mask & message_mask::ha_async)
       {       {
          (static_cast<AsyncMessage *>(msg))->op = op;          (static_cast<AsyncMessage *>(msg))->op = op;
Line 930 
Line 1056 
    if (request->op == 0)    if (request->op == 0)
    {    {
       request->op = get_op();       request->op = get_op();
       request->op->_request.insert_first(request);        request->op->_request.reset(request);
       destroy_op = true;       destroy_op = true;
    }    }
  
Line 945 
Line 1071 
  
    request->op->_client_sem.wait();    request->op->_client_sem.wait();
  
    request->op->lock();     AsyncReply* rpl = static_cast<AsyncReply *>(request->op->removeResponse());
    AsyncReply * rpl = static_cast<AsyncReply *>(request->op->_response.remove_first());  
    rpl->op = 0;    rpl->op = 0;
    request->op->unlock();  
  
    if (destroy_op == true)    if (destroy_op == true)
    {    {
       request->op->lock();       request->op->lock();
       request->op->_request.remove(request);        request->op->_request.release();
       request->op->_state |= ASYNC_OPSTATE_RELEASED;       request->op->_state |= ASYNC_OPSTATE_RELEASED;
       request->op->unlock();       request->op->unlock();
       return_op(request->op);       return_op(request->op);
Line 969 
Line 1093 
     Uint32 mask)     Uint32 mask)
 { {
    RegisterCimService *msg = new RegisterCimService(    RegisterCimService *msg = new RegisterCimService(
       get_next_xid(),  
       0,       0,
       true,       true,
       name,       name,
Line 1004 
Line 1127 
 Boolean MessageQueueService::update_service(Uint32 capabilities, Uint32 mask) Boolean MessageQueueService::update_service(Uint32 capabilities, Uint32 mask)
 { {
    UpdateCimService *msg = new UpdateCimService(    UpdateCimService *msg = new UpdateCimService(
       get_next_xid(),  
       0,       0,
       true,       true,
       _queueId,       _queueId,
Line 1054 
Line 1176 
    results->clear();    results->clear();
  
    FindServiceQueue *req = new FindServiceQueue(    FindServiceQueue *req = new FindServiceQueue(
       get_next_xid(),  
       0,       0,
       _queueId,       _queueId,
       true,       true,
Line 1092 
Line 1213 
    }    }
  
    EnumerateService *req = new EnumerateService(    EnumerateService *req = new EnumerateService(
       get_next_xid(),  
       0,       0,
       _queueId,       _queueId,
       true,       true,
Line 1132 
Line 1252 
    return;    return;
 } }
  
 Uint32 MessageQueueService::get_next_xid()  MessageQueueService::PollingList* MessageQueueService::_get_polling_list()
 { {
    static Mutex _monitor;      _polling_list_mutex.lock();
    Uint32 value;  
    AutoMutex autoMut(_monitor);      if (!_polling_list)
    _xid++;          _polling_list = new PollingList;
    value =  _xid.value();  
    return value;      _polling_list_mutex.unlock();
  
       return _polling_list;
 } }
  
 PEGASUS_NAMESPACE_END PEGASUS_NAMESPACE_END


Legend:
Removed from v.1.109  
changed lines
  Added in v.1.123.2.2

No CVS admin address has been configured
Powered by
ViewCVS 0.9.2