1 martin 1.153 //%LICENSE////////////////////////////////////////////////////////////////
|
2 martin 1.154 //
|
3 martin 1.153 // Licensed to The Open Group (TOG) under one or more contributor license
4 // agreements. Refer to the OpenPegasusNOTICE.txt file distributed with
5 // this work for additional information regarding copyright ownership.
6 // Each contributor licenses this file to you under the OpenPegasus Open
7 // Source License; you may not use this file except in compliance with the
8 // License.
|
9 martin 1.154 //
|
10 martin 1.153 // Permission is hereby granted, free of charge, to any person obtaining a
11 // copy of this software and associated documentation files (the "Software"),
12 // to deal in the Software without restriction, including without limitation
13 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
14 // and/or sell copies of the Software, and to permit persons to whom the
15 // Software is furnished to do so, subject to the following conditions:
|
16 martin 1.154 //
|
17 martin 1.153 // The above copyright notice and this permission notice shall be included
18 // in all copies or substantial portions of the Software.
|
19 martin 1.154 //
|
20 martin 1.153 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
21 martin 1.154 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
22 martin 1.153 // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
24 // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
27 martin 1.154 //
|
28 martin 1.153 //////////////////////////////////////////////////////////////////////////
|
29 mday 1.1 //
30 //%/////////////////////////////////////////////////////////////////////////////
31
32 #include "MessageQueueService.h"
|
33 mday 1.22 #include <Pegasus/Common/Tracer.h>
|
34 kumpf 1.126 #include <Pegasus/Common/MessageLoader.h>
|
35 mday 1.1
|
36 venkat.puvvada 1.158 PEGASUS_USING_STD;
37
|
38 mday 1.1 PEGASUS_NAMESPACE_BEGIN
39
|
40 mday 1.15 cimom *MessageQueueService::_meta_dispatcher = 0;
|
41 mike 1.118 AtomicInt MessageQueueService::_service_count(0);
|
42 venkat.puvvada 1.156 Mutex MessageQueueService::_meta_dispatcher_mutex;
|
43 mday 1.15
|
44 kumpf 1.104 static struct timeval deallocateWait = {300, 0};
|
45 mday 1.53
|
46 mday 1.55 ThreadPool *MessageQueueService::_thread_pool = 0;
|
47 mday 1.53
|
48 mike 1.120 MessageQueueService::PollingList* MessageQueueService::_polling_list;
|
49 venkat.puvvada 1.158 Mutex MessageQueueService::_polling_list_mutex;
|
50 mday 1.53
|
51 kumpf 1.62 Thread* MessageQueueService::_polling_thread = 0;
|
52 mday 1.61
|
53 kumpf 1.104 ThreadPool *MessageQueueService::get_thread_pool()
|
54 mday 1.69 {
55 return _thread_pool;
56 }
|
57 kumpf 1.117
|
58 jim.wunderlich 1.110 //
|
59 kumpf 1.117 // MAX_THREADS_PER_SVC_QUEUE
|
60 jim.wunderlich 1.110 //
61 // JR Wunderlich Jun 6, 2005
62 //
63
|
64 kumpf 1.131 #define MAX_THREADS_PER_SVC_QUEUE_LIMIT 5000
|
65 jim.wunderlich 1.114 #define MAX_THREADS_PER_SVC_QUEUE_DEFAULT 5
|
66 jim.wunderlich 1.110
|
67 kumpf 1.117 #ifndef MAX_THREADS_PER_SVC_QUEUE
68 # define MAX_THREADS_PER_SVC_QUEUE MAX_THREADS_PER_SVC_QUEUE_DEFAULT
69 #endif
70
|
71 jim.wunderlich 1.110 Uint32 max_threads_per_svc_queue;
|
72 mday 1.69
|
73 kumpf 1.131 ThreadReturnType PEGASUS_THREAD_CDECL MessageQueueService::polling_routine(
74 void* parm)
|
75 mday 1.53 {
|
76 kumpf 1.131 Thread *myself = reinterpret_cast<Thread *>(parm);
|
77 venkat.puvvada 1.155 MessageQueueService::PollingList *list =
78 reinterpret_cast<MessageQueueService::PollingList*>(myself->get_parm());
|
79 kumpf 1.131
|
80 venkat.puvvada 1.158 try
|
81 kumpf 1.131 {
|
82 venkat.puvvada 1.158 while (_stop_polling.get() == 0)
|
83 kumpf 1.131 {
|
84 venkat.puvvada 1.158 _polling_sem.wait();
|
85 kumpf 1.131
|
86 venkat.puvvada 1.158 if (_stop_polling.get() != 0)
87 {
88 break;
89 }
|
90 kumpf 1.131
|
91 venkat.puvvada 1.158 // The polling_routine thread must hold the lock on the
92 // _polling_list while processing incoming messages.
93 // This lock is used to give this thread ownership of
94 // services on the _polling_routine list.
95
96 // This is necessary to avoid confict with other threads
97 // processing the _polling_list
98 // (e.g., MessageQueueServer::~MessageQueueService).
99
100 _polling_list_mutex.lock();
101 MessageQueueService *service = list->front();
102 ThreadStatus rtn = PEGASUS_THREAD_OK;
103 while (service != NULL)
|
104 kumpf 1.131 {
|
105 venkat.puvvada 1.158 if ((service->_incoming.count() > 0) &&
106 (service->_die.get() == 0) &&
107 (service->_threads.get() < max_threads_per_svc_queue))
108 {
109 // The _threads count is used to track the
110 // number of active threads that have been allocated
111 // to process messages for this service.
112
113 // The _threads count MUST be incremented while
114 // the polling_routine owns the _polling_thread
115 // lock and has ownership of the service object.
|
116 kumpf 1.131
|
117 venkat.puvvada 1.158 service->_threads++;
|
118 kumpf 1.131 rtn = _thread_pool->allocate_and_awaken(
119 service, _req_proc, &_polling_sem);
|
120 venkat.puvvada 1.158 // if no more threads available, break from processing loop
121 if (rtn != PEGASUS_THREAD_OK )
122 {
123 service->_threads--;
124 PEG_TRACE((TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL1,
125 "Could not allocate thread for %s. Queue has %d "
126 "messages waiting and %d threads servicing."
127 "Skipping the service for right now. ",
128 service->getQueueName(),
129 service->_incoming.count(),
130 service->_threads.get()));
131
132 Threads::yield();
133 break;
134 }
|
135 kumpf 1.131 }
|
136 venkat.puvvada 1.158 service = list->next_of(service);
137 }
138 _polling_list_mutex.unlock();
139 }
140 }
141 catch(const Exception &e)
142 {
143 PEG_TRACE((TRC_MESSAGEQUEUESERVICE,Tracer::LEVEL1,
144 "Exception caught in MessageQueueService::polling_routine : %s",
145 (const char*)e.getMessage().getCString()));
146 }
147 catch(const exception &e)
148 {
149 PEG_TRACE((TRC_MESSAGEQUEUESERVICE,Tracer::LEVEL1,
150 "Exception caught in MessageQueueService::polling_routine : %s",
151 e.what()));
152 }
153 catch(...)
154 {
155 PEG_TRACE_CSTRING(TRC_MESSAGEQUEUESERVICE,Tracer::LEVEL1,
156 "Unknown Exception caught in MessageQueueService::polling_routine");
157 venkat.puvvada 1.158 }
|
158 kumpf 1.131
|
159 venkat.puvvada 1.158 PEGASUS_ASSERT(_stop_polling.get());
|
160 kumpf 1.131
|
161 kumpf 1.135 return ThreadReturnType(0);
|
162 mday 1.53 }
163
164
165 Semaphore MessageQueueService::_polling_sem(0);
166 AtomicInt MessageQueueService::_stop_polling(0);
167
|
168 mday 1.15
|
169 kumpf 1.104 MessageQueueService::MessageQueueService(
|
170 sahana.prabhakar 1.163 const char* name)
|
171 venkat.puvvada 1.165 : Base(name),
|
172 venkat.puvvada 1.158 _die(0),
|
173 kumpf 1.131 _threads(0),
174 _incoming(),
175 _incoming_queue_shutdown(0)
176 {
|
177 venkat.puvvada 1.148 _isRunning = true;
|
178 kumpf 1.131
179 max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE;
180
181 // if requested thread max is out of range, then set to
182 // MAX_THREADS_PER_SVC_QUEUE_LIMIT
183
184 if ((max_threads_per_svc_queue < 1) ||
185 (max_threads_per_svc_queue > MAX_THREADS_PER_SVC_QUEUE_LIMIT))
186 {
187 max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE_LIMIT;
188 }
189
|
190 marek 1.138 PEG_TRACE((TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL3,
|
191 marek 1.132 "max_threads_per_svc_queue set to %u.", max_threads_per_svc_queue));
|
192 kumpf 1.159
|
193 venkat.puvvada 1.158 AutoMutex autoMut(_meta_dispatcher_mutex);
|
194 venkat.puvvada 1.156
|
195 kumpf 1.131 if (_meta_dispatcher == 0)
196 {
197 _stop_polling = 0;
198 PEGASUS_ASSERT(_service_count.get() == 0);
199 _meta_dispatcher = new cimom();
200
201 // _thread_pool = new ThreadPool(initial_cnt, "MessageQueueService",
202 // minimum_cnt, maximum_cnt, deallocateWait);
203 //
204 _thread_pool =
205 new ThreadPool(0, "MessageQueueService", 0, 0, deallocateWait);
206 }
207 _service_count++;
208
|
209 venkat.puvvada 1.155 // Add to the polling list
210 if (!_polling_list)
211 {
212 _polling_list = new PollingList;
213 }
|
214 venkat.puvvada 1.158 _polling_list->insert_back(this);
215 _meta_dispatcher->registerCIMService(this);
|
216 mday 1.1 }
217
|
218 mday 1.4
|
219 kumpf 1.104 MessageQueueService::~MessageQueueService()
|
220 mday 1.1 {
|
221 venkat.puvvada 1.158
|
222 venkat.puvvada 1.152 // Close incoming queue.
|
223 kumpf 1.131 if (_incoming_queue_shutdown.get() == 0)
224 {
|
225 venkat.puvvada 1.152 AsyncIoClose *msg = new AsyncIoClose(
226 0,
|
227 venkat.puvvada 1.164 _queueId);
228
|
229 venkat.puvvada 1.152 SendForget(msg);
230 // Wait until our queue has been shutdown.
231 while (_incoming_queue_shutdown.get() == 0)
232 {
233 Threads::yield();
234 }
|
235 kumpf 1.131 }
|
236 mday 1.76
|
237 venkat.puvvada 1.158 // die now.
238 _die = 1;
239
240 _meta_dispatcher->deregisterCIMService(this);
|
241 venkat.puvvada 1.152
|
242 kumpf 1.131 // Wait until all threads processing the messages
243 // for this service have completed.
244 while (_threads.get() > 0)
|
245 konrad.r 1.109 {
|
246 kumpf 1.131 Threads::yield();
247 }
248
|
249 venkat.puvvada 1.158
250 // The polling_routine locks the _polling_list while
251 // processing the incoming messages for services on the
252 // list. Deleting the service from the _polling_list
253 // prior to processing, avoids synchronization issues
254 // with the _polling_routine.
255 _removeFromPollingList(this);
|
256 venkat.puvvada 1.157
|
257 kumpf 1.131 {
|
258 venkat.puvvada 1.158 AutoMutex autoMut(_meta_dispatcher_mutex);
|
259 venkat.puvvada 1.156
|
260 kumpf 1.131 _service_count--;
|
261 venkat.puvvada 1.152 // If we are last service to die, delete metadispatcher.
|
262 kumpf 1.131 if (_service_count.get() == 0)
263 {
264 _stop_polling++;
265 _polling_sem.signal();
266 if (_polling_thread)
267 {
268 _polling_thread->join();
269 delete _polling_thread;
270 _polling_thread = 0;
271 }
272 delete _meta_dispatcher;
273 _meta_dispatcher = 0;
274
275 delete _thread_pool;
276 _thread_pool = 0;
277 }
|
278 venkat.puvvada 1.155 }
|
279 kumpf 1.145
280 // Clean up any extra stuff on the queue.
281 AsyncOpNode* op = 0;
282 while ((op = _incoming.dequeue()))
|
283 kumpf 1.131 {
|
284 kumpf 1.145 delete op;
|
285 konrad.r 1.109 }
|
286 kumpf 1.104 }
|
287 mday 1.1
|
288 kumpf 1.131 void MessageQueueService::enqueue(Message* msg)
|
289 mday 1.22 {
|
290 kumpf 1.131 PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE, "MessageQueueService::enqueue()");
|
291 kumpf 1.28
|
292 kumpf 1.131 Base::enqueue(msg);
|
293 kumpf 1.28
|
294 kumpf 1.131 PEG_METHOD_EXIT();
|
295 mday 1.22 }
296
297
|
298 mike 1.124 ThreadReturnType PEGASUS_THREAD_CDECL MessageQueueService::_req_proc(
|
299 kumpf 1.131 void* parm)
|
300 kumpf 1.103 {
|
301 konrad.r 1.107 MessageQueueService* service =
|
302 kumpf 1.131 reinterpret_cast<MessageQueueService*>(parm);
|
303 konrad.r 1.107 PEGASUS_ASSERT(service != 0);
|
304 kumpf 1.103 try
305 {
|
306 venkat.puvvada 1.158 if (service->_die.get() != 0)
|
307 kumpf 1.103 {
|
308 denise.eckstein 1.116 service->_threads--;
|
309 kumpf 1.131 return 0;
|
310 kumpf 1.103 }
311 // pull messages off the incoming queue and dispatch them. then
312 // check pending messages that are non-blocking
313 AsyncOpNode *operation = 0;
314
315 // many operations may have been queued.
316 do
317 {
|
318 kumpf 1.145 operation = service->_incoming.dequeue();
|
319 kumpf 1.103
320 if (operation)
321 {
322 service->_handle_incoming_operation(operation);
323 }
324 } while (operation);
325 }
326 catch (const Exception& e)
327 {
|
328 thilo.boehm 1.142 PEG_TRACE((TRC_DISCARDED_DATA, Tracer::LEVEL1,
329 "Caught exception: \"%s\". Exiting _req_proc.",
330 (const char*)e.getMessage().getCString()));
|
331 kumpf 1.103 }
332 catch (...)
333 {
|
334 marek 1.138 PEG_TRACE_CSTRING(TRC_DISCARDED_DATA, Tracer::LEVEL1,
|
335 kumpf 1.103 "Caught unrecognized exception. Exiting _req_proc.");
336 }
|
337 konrad.r 1.107 service->_threads--;
|
338 kumpf 1.131 return 0;
|
339 mday 1.1 }
340
|
341 mday 1.30 // callback function is responsible for cleaning up all resources
342 // including op, op->_callback_node, and op->_callback_ptr
|
343 kumpf 1.131 void MessageQueueService::_handle_async_callback(AsyncOpNode* op)
|
344 mday 1.22 {
|
345 venkat.puvvada 1.147 PEGASUS_ASSERT(op->_flags == ASYNC_OPFLAGS_CALLBACK);
346 // note that _callback_node may be different from op
347 // op->_callback_response_q is a "this" pointer we can use for
348 // static callback methods
349 op->_async_callback(
350 op->_callback_node, op->_callback_response_q, op->_callback_ptr);
|
351 mday 1.22 }
352
|
353 mday 1.1
|
354 kumpf 1.131 void MessageQueueService::_handle_incoming_operation(AsyncOpNode* operation)
|
355 mday 1.1 {
|
356 kumpf 1.131 if (operation != 0)
357 {
358 Message *rq = operation->_request.get();
|
359 kumpf 1.104
|
360 mday 1.31 // optimization <<< Thu Mar 7 21:04:05 2002 mdd >>>
|
361 kumpf 1.104 // move this to the bottom of the loop when the majority of
362 // messages become async messages.
|
363 mday 1.31
|
364 kumpf 1.131 // divert legacy messages to handleEnqueue
365 if ((rq != 0) && (!(rq->getMask() & MessageMask::ha_async)))
366 {
367 operation->_request.release();
368 // delete the op node
369 return_op(operation);
370 handleEnqueue(rq);
371 return;
372 }
373
|
374 venkat.puvvada 1.147 if ((operation->_flags & ASYNC_OPFLAGS_CALLBACK) &&
|
375 kumpf 1.131 (operation->_state & ASYNC_OPSTATE_COMPLETE))
376 {
377 _handle_async_callback(operation);
378 }
379 else
380 {
381 PEGASUS_ASSERT(rq != 0);
382 _handle_async_request(static_cast<AsyncRequest *>(rq));
383 }
384 }
385 return;
|
386 mday 1.1 }
387
388 void MessageQueueService::_handle_async_request(AsyncRequest *req)
389 {
|
390 venkat.puvvada 1.147 MessageType type = req->getType();
|
391 venkat.puvvada 1.152 if (type == ASYNC_IOCLOSE)
|
392 venkat.puvvada 1.147 {
|
393 venkat.puvvada 1.152 handle_AsyncIoClose(static_cast<AsyncIoClose*>(req));
|
394 venkat.puvvada 1.147 }
395 else if (type == ASYNC_CIMSERVICE_START)
396 {
397 handle_CimServiceStart(static_cast<CimServiceStart *>(req));
398 }
399 else if (type == ASYNC_CIMSERVICE_STOP)
|
400 kumpf 1.131 {
|
401 venkat.puvvada 1.147 handle_CimServiceStop(static_cast<CimServiceStop *>(req));
402 }
403 else
404 {
405 // we don't handle this request message
406 _make_response(req, async_results::CIM_NAK);
|
407 kumpf 1.131 }
|
408 mday 1.1 }
409
|
410 mday 1.17 Boolean MessageQueueService::_enqueueResponse(
|
411 kumpf 1.131 Message* request,
412 Message* response)
|
413 mday 1.17 {
|
414 kumpf 1.131 PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE,
415 "MessageQueueService::_enqueueResponse");
|
416 mday 1.25
|
417 kumpf 1.131 if (request->getMask() & MessageMask::ha_async)
418 {
419 if (response->getMask() & MessageMask::ha_async)
420 {
421 _completeAsyncResponse(
422 static_cast<AsyncRequest *>(request),
|
423 venkat.puvvada 1.149 static_cast<AsyncReply *>(response));
424
|
425 kumpf 1.131 PEG_METHOD_EXIT();
426 return true;
427 }
428 }
|
429 mday 1.63
|
430 kumpf 1.134 AsyncRequest* asyncRequest =
431 static_cast<AsyncRequest*>(request->get_async());
432
433 if (asyncRequest != 0)
|
434 kumpf 1.131 {
|
435 kumpf 1.134 PEGASUS_ASSERT(asyncRequest->getMask() &
|
436 kumpf 1.131 (MessageMask::ha_async | MessageMask::ha_request));
437
|
438 kumpf 1.134 AsyncOpNode* op = asyncRequest->op;
439
|
440 kumpf 1.131 // the legacy request is going to be deleted by its handler
441 // remove it from the op node
442
|
443 kumpf 1.134 static_cast<AsyncLegacyOperationStart *>(asyncRequest)->get_action();
|
444 kumpf 1.131
445 AsyncLegacyOperationResult *async_result =
446 new AsyncLegacyOperationResult(
447 op,
448 response);
449 _completeAsyncResponse(
|
450 kumpf 1.134 asyncRequest,
|
451 venkat.puvvada 1.149 async_result);
452
|
453 kumpf 1.131 PEG_METHOD_EXIT();
454 return true;
455 }
|
456 kumpf 1.104
|
457 kumpf 1.131 // ensure that the destination queue is in response->dest
458 PEG_METHOD_EXIT();
459 return SendForget(response);
|
460 mday 1.17 }
461
|
462 kumpf 1.131 void MessageQueueService::_make_response(Message* req, Uint32 code)
|
463 mday 1.1 {
|
464 kumpf 1.131 cimom::_make_response(req, code);
|
465 mday 1.1 }
466
|
467 kumpf 1.104 void MessageQueueService::_completeAsyncResponse(
|
468 kumpf 1.131 AsyncRequest* request,
|
469 venkat.puvvada 1.149 AsyncReply* reply)
|
470 mday 1.5 {
|
471 kumpf 1.131 PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE,
472 "MessageQueueService::_completeAsyncResponse");
|
473 kumpf 1.37
|
474 venkat.puvvada 1.149 cimom::_completeAsyncResponse(request, reply);
|
475 kumpf 1.37
|
476 kumpf 1.131 PEG_METHOD_EXIT();
|
477 mday 1.5 }
478
479
|
480 kumpf 1.104 void MessageQueueService::_complete_op_node(
|
481 venkat.puvvada 1.149 AsyncOpNode* op)
|
482 mday 1.32 {
|
483 venkat.puvvada 1.149 cimom::_complete_op_node(op);
|
484 mday 1.32 }
485
|
486 mday 1.5
|
487 kumpf 1.131 Boolean MessageQueueService::accept_async(AsyncOpNode* op)
|
488 mday 1.5 {
|
489 venkat.puvvada 1.158 if (!_isRunning)
490 {
491 // Don't accept any messages other than start.
492 if (op->_request.get()->getType() != ASYNC_CIMSERVICE_START)
493 {
494 return false;
495 }
496 }
497
|
498 kumpf 1.131 if (_incoming_queue_shutdown.get() > 0)
499 return false;
|
500 venkat.puvvada 1.158
|
501 kumpf 1.131 if (_polling_thread == NULL)
502 {
|
503 venkat.puvvada 1.155 PEGASUS_ASSERT(_polling_list);
|
504 kumpf 1.131 _polling_thread = new Thread(
505 polling_routine,
|
506 venkat.puvvada 1.155 reinterpret_cast<void *>(_polling_list),
|
507 kumpf 1.131 false);
508 ThreadStatus tr = PEGASUS_THREAD_OK;
509 while ( (tr =_polling_thread->run()) != PEGASUS_THREAD_OK)
510 {
511 if (tr == PEGASUS_THREAD_INSUFFICIENT_RESOURCES)
512 Threads::yield();
513 else
514 throw Exception(MessageLoaderParms(
515 "Common.MessageQueueService.NOT_ENOUGH_THREAD",
516 "Could not allocate thread for the polling thread."));
517 }
518 }
|
519 venkat.puvvada 1.158 if (_die.get() == 0)
|
520 kumpf 1.131 {
|
521 kumpf 1.145 if (_incoming.enqueue(op))
522 {
523 _polling_sem.signal();
524 return true;
525 }
|
526 kumpf 1.131 }
527 return false;
|
528 mday 1.5 }
529
|
530 venkat.puvvada 1.152 void MessageQueueService::handle_AsyncIoClose(AsyncIoClose *req)
|
531 mday 1.1 {
|
532 kumpf 1.159 MessageQueueService *service =
|
533 venkat.puvvada 1.152 static_cast<MessageQueueService*>(req->op->_op_dest);
|
534 kumpf 1.81
535 #ifdef MESSAGEQUEUESERVICE_DEBUG
|
536 venkat.puvvada 1.152 PEGASUS_STD(cout) << service->getQueueName() <<
537 " Received AsyncIoClose " << PEGASUS_STD(endl);
|
538 kumpf 1.81 #endif
|
539 venkat.puvvada 1.152 // set the closing flag, don't accept any more messages
540 service->_incoming_queue_shutdown = 1;
|
541 kumpf 1.104
|
542 venkat.puvvada 1.152 // respond to this message. this is fire and forget, so we
543 // don't need to delete anything.
544 // this takes care of two problems that were being found
545 // << Thu Oct 9 10:52:48 2003 mdd >>
546 _make_response(req, async_results::OK);
|
547 mday 1.1 }
|
548 mday 1.8
|
549 kumpf 1.131 void MessageQueueService::handle_CimServiceStart(CimServiceStart* req)
|
550 mday 1.1 {
|
551 kumpf 1.81 #ifdef MESSAGEQUEUESERVICE_DEBUG
|
552 kumpf 1.131 PEGASUS_STD(cout) << getQueueName() << "received START" <<
553 PEGASUS_STD(endl);
|
554 kumpf 1.81 #endif
|
555 venkat.puvvada 1.148 PEGASUS_ASSERT(!_isRunning);
556 _isRunning = true;
|
557 kumpf 1.131 _make_response(req, async_results::OK);
558 }
|
559 mday 1.10
|
560 kumpf 1.131 void MessageQueueService::handle_CimServiceStop(CimServiceStop* req)
|
561 mday 1.1 {
|
562 kumpf 1.81 #ifdef MESSAGEQUEUESERVICE_DEBUG
|
563 kumpf 1.131 PEGASUS_STD(cout) << getQueueName() << "received STOP" << PEGASUS_STD(endl);
|
564 kumpf 1.81 #endif
|
565 venkat.puvvada 1.148 PEGASUS_ASSERT(_isRunning);
566 _isRunning = false;
|
567 venkat.puvvada 1.150 _make_response(req, async_results::CIM_SERVICE_STOPPED);
|
568 mday 1.14 }
569
|
570 kumpf 1.131 AsyncOpNode* MessageQueueService::get_op()
|
571 mday 1.1 {
|
572 kumpf 1.131 AsyncOpNode* op = new AsyncOpNode();
|
573 kumpf 1.104
|
574 mday 1.9 op->_state = ASYNC_OPSTATE_UNKNOWN;
|
575 venkat.puvvada 1.147 op->_flags = ASYNC_OPFLAGS_UNKNOWN;
|
576 kumpf 1.104
|
577 mday 1.1 return op;
578 }
579
|
580 kumpf 1.131 void MessageQueueService::return_op(AsyncOpNode* op)
|
581 mday 1.1 {
|
582 kumpf 1.131 delete op;
|
583 mday 1.1 }
584
|
585 mday 1.18
|
586 kumpf 1.104 Boolean MessageQueueService::SendAsync(
|
587 kumpf 1.131 AsyncOpNode* op,
|
588 kumpf 1.104 Uint32 destination,
|
589 kumpf 1.131 void (*callback)(AsyncOpNode*, MessageQueue*, void*),
590 MessageQueue* callback_response_q,
591 void* callback_ptr)
592 {
|
593 venkat.puvvada 1.147 return _sendAsync(
594 op,
595 destination,
596 callback,
597 callback_response_q,
598 callback_ptr,
599 ASYNC_OPFLAGS_CALLBACK);
600
601 }
602
603 Boolean MessageQueueService::_sendAsync(
604 AsyncOpNode* op,
605 Uint32 destination,
606 void (*callback)(AsyncOpNode*, MessageQueue*, void*),
607 MessageQueue* callback_response_q,
608 void* callback_ptr,
609 Uint32 flags)
610 {
|
611 venkat.puvvada 1.162 PEGASUS_ASSERT(op != 0);
612 PEGASUS_ASSERT((callback == 0) ==
613 (flags == ASYNC_OPFLAGS_PSEUDO_CALLBACK));
|
614 kumpf 1.131
615 // destination of this message
616 op->_op_dest = MessageQueue::lookup(destination);
|
617 venkat.puvvada 1.147 if (op->_op_dest == 0)
618 {
619 return false;
620 }
621 op->_flags = flags;
|
622 kumpf 1.131 // initialize the callback data
623 // callback function to be executed by recpt. of response
624 op->_async_callback = callback;
625 // the op node
626 op->_callback_node = op;
627 // the queue that will receive the response
628 op->_callback_response_q = callback_response_q;
629 // user data for callback
630 op->_callback_ptr = callback_ptr;
631
632 return _meta_dispatcher->route_async(op);
|
633 mday 1.18 }
634
|
635 kumpf 1.131 Boolean MessageQueueService::SendForget(Message* msg)
636 {
637 AsyncOpNode* op = 0;
638 Uint32 mask = msg->getMask();
639
640 if (mask & MessageMask::ha_async)
641 {
642 op = (static_cast<AsyncMessage *>(msg))->op;
643 }
644
645 if (op == 0)
646 {
647 op = get_op();
648 op->_request.reset(msg);
649 if (mask & MessageMask::ha_async)
650 {
651 (static_cast<AsyncMessage *>(msg))->op = op;
652 }
653 }
|
654 venkat.puvvada 1.147
655 PEGASUS_ASSERT(op->_flags == ASYNC_OPFLAGS_UNKNOWN);
656 PEGASUS_ASSERT(op->_state == ASYNC_OPSTATE_UNKNOWN);
|
657 kumpf 1.131 op->_op_dest = MessageQueue::lookup(msg->dest);
658 if (op->_op_dest == 0)
659 {
660 return_op(op);
661 return false;
662 }
663
|
664 venkat.puvvada 1.147 op->_flags = ASYNC_OPFLAGS_FIRE_AND_FORGET;
665
|
666 kumpf 1.131 // now see if the meta dispatcher will take it
667 return _meta_dispatcher->route_async(op);
668 }
669
670
671 AsyncReply *MessageQueueService::SendWait(AsyncRequest* request)
672 {
673 if (request == 0)
674 return 0;
675
676 Boolean destroy_op = false;
677
678 if (request->op == 0)
679 {
680 request->op = get_op();
681 request->op->_request.reset(request);
682 destroy_op = true;
683 }
|
684 kumpf 1.159
|
685 venkat.puvvada 1.147 PEGASUS_ASSERT(request->op->_flags == ASYNC_OPFLAGS_UNKNOWN);
686 PEGASUS_ASSERT(request->op->_state == ASYNC_OPSTATE_UNKNOWN);
|
687 kumpf 1.131
|
688 venkat.puvvada 1.147 _sendAsync(
|
689 kumpf 1.131 request->op,
690 request->dest,
|
691 venkat.puvvada 1.162 0,
|
692 venkat.puvvada 1.166 0,
|
693 venkat.puvvada 1.147 (void *)0,
694 ASYNC_OPFLAGS_PSEUDO_CALLBACK);
|
695 kumpf 1.131
696 request->op->_client_sem.wait();
697
698 AsyncReply* rpl = static_cast<AsyncReply *>(request->op->removeResponse());
699 rpl->op = 0;
700
701 if (destroy_op == true)
702 {
703 request->op->_request.release();
704 return_op(request->op);
705 request->op = 0;
706 }
707 return rpl;
|
708 mday 1.1 }
709
|
710 kumpf 1.160 Uint32 MessageQueueService::find_service_qid(const char* name)
|
711 mday 1.1 {
|
712 kumpf 1.160 MessageQueue* queue = MessageQueue::lookup(name);
|
713 venkat.puvvada 1.146 PEGASUS_ASSERT(queue);
714 return queue->getQueueId();
|
715 mday 1.1 }
716
|
717 venkat.puvvada 1.158 void MessageQueueService::_removeFromPollingList(MessageQueueService *service)
718 {
719 _polling_list_mutex.lock();
720 _polling_list->remove(service);
721 _polling_list_mutex.unlock();
722 }
723
|
724 mday 1.1 PEGASUS_NAMESPACE_END
|