1 martin 1.153 //%LICENSE////////////////////////////////////////////////////////////////
|
2 martin 1.154 //
|
3 martin 1.153 // Licensed to The Open Group (TOG) under one or more contributor license
4 // agreements. Refer to the OpenPegasusNOTICE.txt file distributed with
5 // this work for additional information regarding copyright ownership.
6 // Each contributor licenses this file to you under the OpenPegasus Open
7 // Source License; you may not use this file except in compliance with the
8 // License.
|
9 martin 1.154 //
|
10 martin 1.153 // Permission is hereby granted, free of charge, to any person obtaining a
11 // copy of this software and associated documentation files (the "Software"),
12 // to deal in the Software without restriction, including without limitation
13 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
14 // and/or sell copies of the Software, and to permit persons to whom the
15 // Software is furnished to do so, subject to the following conditions:
|
16 martin 1.154 //
|
17 martin 1.153 // The above copyright notice and this permission notice shall be included
18 // in all copies or substantial portions of the Software.
|
19 martin 1.154 //
|
20 martin 1.153 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
21 martin 1.154 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
22 martin 1.153 // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
24 // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
27 martin 1.154 //
|
28 martin 1.153 //////////////////////////////////////////////////////////////////////////
|
29 mday 1.1 //
30 //%/////////////////////////////////////////////////////////////////////////////
31
32 #include "MessageQueueService.h"
|
33 mday 1.22 #include <Pegasus/Common/Tracer.h>
|
34 kumpf 1.126 #include <Pegasus/Common/MessageLoader.h>
|
35 mday 1.1
36 PEGASUS_NAMESPACE_BEGIN
37
|
38 mday 1.15 cimom *MessageQueueService::_meta_dispatcher = 0;
|
39 mike 1.118 AtomicInt MessageQueueService::_service_count(0);
|
40 mday 1.38 Mutex MessageQueueService::_meta_dispatcher_mutex;
|
41 mday 1.15
|
42 kumpf 1.104 static struct timeval deallocateWait = {300, 0};
|
43 mday 1.53
|
44 mday 1.55 ThreadPool *MessageQueueService::_thread_pool = 0;
|
45 mday 1.53
|
46 mike 1.120 MessageQueueService::PollingList* MessageQueueService::_polling_list;
47 Mutex MessageQueueService::_polling_list_mutex;
|
48 mday 1.53
|
49 kumpf 1.62 Thread* MessageQueueService::_polling_thread = 0;
|
50 mday 1.61
|
51 kumpf 1.104 ThreadPool *MessageQueueService::get_thread_pool()
|
52 mday 1.69 {
53 return _thread_pool;
54 }
|
55 kumpf 1.117
|
56 jim.wunderlich 1.110 //
|
57 kumpf 1.117 // MAX_THREADS_PER_SVC_QUEUE
|
58 jim.wunderlich 1.110 //
59 // JR Wunderlich Jun 6, 2005
60 //
61
|
62 kumpf 1.131 #define MAX_THREADS_PER_SVC_QUEUE_LIMIT 5000
|
63 jim.wunderlich 1.114 #define MAX_THREADS_PER_SVC_QUEUE_DEFAULT 5
|
64 jim.wunderlich 1.110
|
65 kumpf 1.117 #ifndef MAX_THREADS_PER_SVC_QUEUE
66 # define MAX_THREADS_PER_SVC_QUEUE MAX_THREADS_PER_SVC_QUEUE_DEFAULT
67 #endif
68
|
69 jim.wunderlich 1.110 Uint32 max_threads_per_svc_queue;
|
70 mday 1.69
|
71 kumpf 1.131 ThreadReturnType PEGASUS_THREAD_CDECL MessageQueueService::polling_routine(
72 void* parm)
|
73 mday 1.53 {
|
74 kumpf 1.131 Thread *myself = reinterpret_cast<Thread *>(parm);
75 List<MessageQueueService, Mutex> *list =
76 reinterpret_cast<List<MessageQueueService, Mutex>*>(myself->get_parm());
77
78 while (_stop_polling.get() == 0)
79 {
80 _polling_sem.wait();
81
82 if (_stop_polling.get() != 0)
83 {
84 break;
85 }
86
87 // The polling_routine thread must hold the lock on the
88 // _polling_list while processing incoming messages.
89 // This lock is used to give this thread ownership of
90 // services on the _polling_routine list.
91
92 // This is necessary to avoid confict with other threads
93 // processing the _polling_list
94 // (e.g., MessageQueueServer::~MessageQueueService).
95 kumpf 1.131
96 list->lock();
97 MessageQueueService *service = list->front();
98 ThreadStatus rtn = PEGASUS_THREAD_OK;
99 while (service != NULL)
100 {
101 if ((service->_incoming.count() > 0) &&
102 (service->_die.get() == 0) &&
103 (service->_threads.get() < max_threads_per_svc_queue))
104 {
105 // The _threads count is used to track the
106 // number of active threads that have been allocated
107 // to process messages for this service.
108
109 // The _threads count MUST be incremented while
110 // the polling_routine owns the _polling_thread
111 // lock and has ownership of the service object.
112
113 service->_threads++;
114 try
115 {
116 kumpf 1.131 rtn = _thread_pool->allocate_and_awaken(
117 service, _req_proc, &_polling_sem);
118 }
119 catch (...)
120 {
121 service->_threads--;
122
123 // allocate_and_awaken should never generate an exception.
124 PEGASUS_ASSERT(0);
125 }
126 // if no more threads available, break from processing loop
127 if (rtn != PEGASUS_THREAD_OK )
128 {
129 service->_threads--;
|
130 marek 1.137 PEG_TRACE((TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL1,
|
131 kumpf 1.131 "Could not allocate thread for %s. Queue has %d "
132 "messages waiting and %d threads servicing."
133 "Skipping the service for right now. ",
134 service->getQueueName(),
135 service->_incoming.count(),
|
136 marek 1.132 service->_threads.get()));
|
137 kumpf 1.131
138 Threads::yield();
139 service = NULL;
140 }
141 }
142 if (service != NULL)
143 {
144 service = list->next_of(service);
145 }
146 }
147 list->unlock();
148 }
|
149 kumpf 1.135 return ThreadReturnType(0);
|
150 mday 1.53 }
151
152
153 Semaphore MessageQueueService::_polling_sem(0);
154 AtomicInt MessageQueueService::_stop_polling(0);
155
|
156 mday 1.15
|
157 kumpf 1.104 MessageQueueService::MessageQueueService(
|
158 kumpf 1.131 const char* name,
|
159 venkat.puvvada 1.148 Uint32 queueID)
|
160 kumpf 1.131 : Base(name, true, queueID),
161 _die(0),
162 _threads(0),
163 _incoming(),
164 _incoming_queue_shutdown(0)
165 {
|
166 venkat.puvvada 1.148 _isRunning = true;
|
167 kumpf 1.131
168 max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE;
169
170 // if requested thread max is out of range, then set to
171 // MAX_THREADS_PER_SVC_QUEUE_LIMIT
172
173 if ((max_threads_per_svc_queue < 1) ||
174 (max_threads_per_svc_queue > MAX_THREADS_PER_SVC_QUEUE_LIMIT))
175 {
176 max_threads_per_svc_queue = MAX_THREADS_PER_SVC_QUEUE_LIMIT;
177 }
178
|
179 marek 1.138 PEG_TRACE((TRC_MESSAGEQUEUESERVICE, Tracer::LEVEL3,
|
180 marek 1.132 "max_threads_per_svc_queue set to %u.", max_threads_per_svc_queue));
|
181 kumpf 1.131
182 AutoMutex autoMut(_meta_dispatcher_mutex);
183
184 if (_meta_dispatcher == 0)
185 {
186 _stop_polling = 0;
187 PEGASUS_ASSERT(_service_count.get() == 0);
188 _meta_dispatcher = new cimom();
189
190 // _thread_pool = new ThreadPool(initial_cnt, "MessageQueueService",
191 // minimum_cnt, maximum_cnt, deallocateWait);
192 //
193 _thread_pool =
194 new ThreadPool(0, "MessageQueueService", 0, 0, deallocateWait);
195 }
196 _service_count++;
197
198 _get_polling_list()->insert_back(this);
|
199 mday 1.1 }
200
|
201 mday 1.4
|
202 kumpf 1.104 MessageQueueService::~MessageQueueService()
|
203 mday 1.1 {
|
204 venkat.puvvada 1.152 // Close incoming queue.
|
205 kumpf 1.131 if (_incoming_queue_shutdown.get() == 0)
206 {
|
207 venkat.puvvada 1.152 AsyncIoClose *msg = new AsyncIoClose(
208 0,
209 _queueId,
210 _queueId,
211 true);
212 SendForget(msg);
213 // Wait until our queue has been shutdown.
214 while (_incoming_queue_shutdown.get() == 0)
215 {
216 Threads::yield();
217 }
|
218 kumpf 1.131 }
|
219 mday 1.76
|
220 venkat.puvvada 1.152 // die now.
221 _die = 1;
222
|
223 kumpf 1.131 // Wait until all threads processing the messages
224 // for this service have completed.
225 while (_threads.get() > 0)
|
226 konrad.r 1.109 {
|
227 kumpf 1.131 Threads::yield();
228 }
229
|
230 venkat.puvvada 1.152 // The polling_routine locks the _polling_list while
231 // processing the incoming messages for services on the
232 // list. Deleting the service from the _polling_list
233 // prior to processing, avoids synchronization issues
234 // with the _polling_routine.
235 _removeFromPollingList(this);
236
|
237 kumpf 1.131 {
238 AutoMutex autoMut(_meta_dispatcher_mutex);
239 _service_count--;
|
240 venkat.puvvada 1.152 // If we are last service to die, delete metadispatcher.
|
241 kumpf 1.131 if (_service_count.get() == 0)
242 {
243 _stop_polling++;
244 _polling_sem.signal();
245 if (_polling_thread)
246 {
247 _polling_thread->join();
248 delete _polling_thread;
249 _polling_thread = 0;
250 }
251 delete _meta_dispatcher;
252 _meta_dispatcher = 0;
253
254 delete _thread_pool;
255 _thread_pool = 0;
256 }
257 } // mutex unlocks here
|
258 kumpf 1.145
259 // Clean up any extra stuff on the queue.
260 AsyncOpNode* op = 0;
261 while ((op = _incoming.dequeue()))
|
262 kumpf 1.131 {
|
263 kumpf 1.145 delete op;
|
264 konrad.r 1.109 }
|
265 kumpf 1.104 }
|
266 mday 1.1
|
267 kumpf 1.131 void MessageQueueService::enqueue(Message* msg)
|
268 mday 1.22 {
|
269 kumpf 1.131 PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE, "MessageQueueService::enqueue()");
|
270 kumpf 1.28
|
271 kumpf 1.131 Base::enqueue(msg);
|
272 kumpf 1.28
|
273 kumpf 1.131 PEG_METHOD_EXIT();
|
274 mday 1.22 }
275
276
|
277 mike 1.124 ThreadReturnType PEGASUS_THREAD_CDECL MessageQueueService::_req_proc(
|
278 kumpf 1.131 void* parm)
|
279 kumpf 1.103 {
|
280 konrad.r 1.107 MessageQueueService* service =
|
281 kumpf 1.131 reinterpret_cast<MessageQueueService*>(parm);
|
282 konrad.r 1.107 PEGASUS_ASSERT(service != 0);
|
283 kumpf 1.103 try
284 {
|
285 mike 1.118 if (service->_die.get() != 0)
|
286 kumpf 1.103 {
|
287 denise.eckstein 1.116 service->_threads--;
|
288 kumpf 1.131 return 0;
|
289 kumpf 1.103 }
290 // pull messages off the incoming queue and dispatch them. then
291 // check pending messages that are non-blocking
292 AsyncOpNode *operation = 0;
293
294 // many operations may have been queued.
295 do
296 {
|
297 kumpf 1.145 operation = service->_incoming.dequeue();
|
298 kumpf 1.103
299 if (operation)
300 {
301 operation->_service_ptr = service;
302 service->_handle_incoming_operation(operation);
303 }
304 } while (operation);
305 }
306 catch (const Exception& e)
307 {
|
308 thilo.boehm 1.142 PEG_TRACE((TRC_DISCARDED_DATA, Tracer::LEVEL1,
309 "Caught exception: \"%s\". Exiting _req_proc.",
310 (const char*)e.getMessage().getCString()));
|
311 kumpf 1.103 }
312 catch (...)
313 {
|
314 marek 1.138 PEG_TRACE_CSTRING(TRC_DISCARDED_DATA, Tracer::LEVEL1,
|
315 kumpf 1.103 "Caught unrecognized exception. Exiting _req_proc.");
316 }
|
317 konrad.r 1.107 service->_threads--;
|
318 kumpf 1.131 return 0;
|
319 mday 1.1 }
320
|
321 mday 1.43
|
322 kumpf 1.104 void MessageQueueService::_sendwait_callback(
|
323 kumpf 1.131 AsyncOpNode* op,
324 MessageQueue* q,
|
325 kumpf 1.104 void *parm)
|
326 mday 1.33 {
|
327 kumpf 1.131 op->_client_sem.signal();
|
328 mday 1.33 }
329
|
330 mday 1.30
331 // callback function is responsible for cleaning up all resources
332 // including op, op->_callback_node, and op->_callback_ptr
|
333 kumpf 1.131 void MessageQueueService::_handle_async_callback(AsyncOpNode* op)
|
334 mday 1.22 {
|
335 venkat.puvvada 1.147 PEGASUS_ASSERT(op->_flags == ASYNC_OPFLAGS_CALLBACK);
336 // note that _callback_node may be different from op
337 // op->_callback_response_q is a "this" pointer we can use for
338 // static callback methods
339 op->_async_callback(
340 op->_callback_node, op->_callback_response_q, op->_callback_ptr);
|
341 mday 1.22 }
342
|
343 mday 1.1
|
344 kumpf 1.131 void MessageQueueService::_handle_incoming_operation(AsyncOpNode* operation)
|
345 mday 1.1 {
|
346 kumpf 1.131 if (operation != 0)
347 {
348 Message *rq = operation->_request.get();
|
349 kumpf 1.104
|
350 mday 1.31 // optimization <<< Thu Mar 7 21:04:05 2002 mdd >>>
|
351 kumpf 1.104 // move this to the bottom of the loop when the majority of
352 // messages become async messages.
|
353 mday 1.31
|
354 kumpf 1.131 // divert legacy messages to handleEnqueue
355 if ((rq != 0) && (!(rq->getMask() & MessageMask::ha_async)))
356 {
357 operation->_request.release();
358 // delete the op node
359 return_op(operation);
360 handleEnqueue(rq);
361 return;
362 }
363
|
364 venkat.puvvada 1.147 if ((operation->_flags & ASYNC_OPFLAGS_CALLBACK) &&
|
365 kumpf 1.131 (operation->_state & ASYNC_OPSTATE_COMPLETE))
366 {
367 _handle_async_callback(operation);
368 }
369 else
370 {
371 PEGASUS_ASSERT(rq != 0);
372 _handle_async_request(static_cast<AsyncRequest *>(rq));
373 }
374 }
375 return;
|
376 mday 1.1 }
377
378 void MessageQueueService::_handle_async_request(AsyncRequest *req)
379 {
|
380 venkat.puvvada 1.147 MessageType type = req->getType();
|
381 venkat.puvvada 1.152 if (type == ASYNC_IOCLOSE)
|
382 venkat.puvvada 1.147 {
|
383 venkat.puvvada 1.152 handle_AsyncIoClose(static_cast<AsyncIoClose*>(req));
|
384 venkat.puvvada 1.147 }
385 else if (type == ASYNC_CIMSERVICE_START)
386 {
387 handle_CimServiceStart(static_cast<CimServiceStart *>(req));
388 }
389 else if (type == ASYNC_CIMSERVICE_STOP)
|
390 kumpf 1.131 {
|
391 venkat.puvvada 1.147 handle_CimServiceStop(static_cast<CimServiceStop *>(req));
392 }
393 else
394 {
395 // we don't handle this request message
396 _make_response(req, async_results::CIM_NAK);
|
397 kumpf 1.131 }
|
398 mday 1.1 }
399
|
400 mday 1.17 Boolean MessageQueueService::_enqueueResponse(
|
401 kumpf 1.131 Message* request,
402 Message* response)
|
403 mday 1.17 {
|
404 kumpf 1.131 PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE,
405 "MessageQueueService::_enqueueResponse");
|
406 mday 1.25
|
407 kumpf 1.131 if (request->getMask() & MessageMask::ha_async)
408 {
409 if (response->getMask() & MessageMask::ha_async)
410 {
411 _completeAsyncResponse(
412 static_cast<AsyncRequest *>(request),
|
413 venkat.puvvada 1.149 static_cast<AsyncReply *>(response));
414
|
415 kumpf 1.131 PEG_METHOD_EXIT();
416 return true;
417 }
418 }
|
419 mday 1.63
|
420 kumpf 1.134 AsyncRequest* asyncRequest =
421 static_cast<AsyncRequest*>(request->get_async());
422
423 if (asyncRequest != 0)
|
424 kumpf 1.131 {
|
425 kumpf 1.134 PEGASUS_ASSERT(asyncRequest->getMask() &
|
426 kumpf 1.131 (MessageMask::ha_async | MessageMask::ha_request));
427
|
428 kumpf 1.134 AsyncOpNode* op = asyncRequest->op;
429
|
430 kumpf 1.131 // the legacy request is going to be deleted by its handler
431 // remove it from the op node
432
|
433 kumpf 1.134 static_cast<AsyncLegacyOperationStart *>(asyncRequest)->get_action();
|
434 kumpf 1.131
435 AsyncLegacyOperationResult *async_result =
436 new AsyncLegacyOperationResult(
437 op,
438 response);
439 _completeAsyncResponse(
|
440 kumpf 1.134 asyncRequest,
|
441 venkat.puvvada 1.149 async_result);
442
|
443 kumpf 1.131 PEG_METHOD_EXIT();
444 return true;
445 }
|
446 kumpf 1.104
|
447 kumpf 1.131 // ensure that the destination queue is in response->dest
448 PEG_METHOD_EXIT();
449 return SendForget(response);
|
450 mday 1.17 }
451
|
452 kumpf 1.131 void MessageQueueService::_make_response(Message* req, Uint32 code)
|
453 mday 1.1 {
|
454 kumpf 1.131 cimom::_make_response(req, code);
|
455 mday 1.1 }
456
|
457 kumpf 1.104 void MessageQueueService::_completeAsyncResponse(
|
458 kumpf 1.131 AsyncRequest* request,
|
459 venkat.puvvada 1.149 AsyncReply* reply)
|
460 mday 1.5 {
|
461 kumpf 1.131 PEG_METHOD_ENTER(TRC_MESSAGEQUEUESERVICE,
462 "MessageQueueService::_completeAsyncResponse");
|
463 kumpf 1.37
|
464 venkat.puvvada 1.149 cimom::_completeAsyncResponse(request, reply);
|
465 kumpf 1.37
|
466 kumpf 1.131 PEG_METHOD_EXIT();
|
467 mday 1.5 }
468
469
|
470 kumpf 1.104 void MessageQueueService::_complete_op_node(
|
471 venkat.puvvada 1.149 AsyncOpNode* op)
|
472 mday 1.32 {
|
473 venkat.puvvada 1.149 cimom::_complete_op_node(op);
|
474 mday 1.32 }
475
|
476 mday 1.5
|
477 kumpf 1.131 Boolean MessageQueueService::accept_async(AsyncOpNode* op)
|
478 mday 1.5 {
|
479 kumpf 1.131 if (_incoming_queue_shutdown.get() > 0)
480 return false;
481 if (_polling_thread == NULL)
482 {
483 _polling_thread = new Thread(
484 polling_routine,
485 reinterpret_cast<void *>(_get_polling_list()),
486 false);
487 ThreadStatus tr = PEGASUS_THREAD_OK;
488 while ( (tr =_polling_thread->run()) != PEGASUS_THREAD_OK)
489 {
490 if (tr == PEGASUS_THREAD_INSUFFICIENT_RESOURCES)
491 Threads::yield();
492 else
493 throw Exception(MessageLoaderParms(
494 "Common.MessageQueueService.NOT_ENOUGH_THREAD",
495 "Could not allocate thread for the polling thread."));
496 }
497 }
|
498 venkat.puvvada 1.151 if (_die.get() == 0)
|
499 kumpf 1.131 {
|
500 kumpf 1.145 if (_incoming.enqueue(op))
501 {
502 _polling_sem.signal();
503 return true;
504 }
|
505 kumpf 1.131 }
506 return false;
|
507 mday 1.5 }
508
|
509 venkat.puvvada 1.152 void MessageQueueService::handle_AsyncIoClose(AsyncIoClose *req)
|
510 mday 1.1 {
|
511 venkat.puvvada 1.152 MessageQueueService *service =
512 static_cast<MessageQueueService*>(req->op->_op_dest);
|
513 kumpf 1.81
514 #ifdef MESSAGEQUEUESERVICE_DEBUG
|
515 venkat.puvvada 1.152 PEGASUS_STD(cout) << service->getQueueName() <<
516 " Received AsyncIoClose " << PEGASUS_STD(endl);
|
517 kumpf 1.81 #endif
|
518 venkat.puvvada 1.152 // set the closing flag, don't accept any more messages
519 service->_incoming_queue_shutdown = 1;
|
520 kumpf 1.104
|
521 venkat.puvvada 1.152 // respond to this message. this is fire and forget, so we
522 // don't need to delete anything.
523 // this takes care of two problems that were being found
524 // << Thu Oct 9 10:52:48 2003 mdd >>
525 _make_response(req, async_results::OK);
|
526 mday 1.1 }
|
527 mday 1.8
|
528 kumpf 1.131 void MessageQueueService::handle_CimServiceStart(CimServiceStart* req)
|
529 mday 1.1 {
|
530 kumpf 1.81 #ifdef MESSAGEQUEUESERVICE_DEBUG
|
531 kumpf 1.131 PEGASUS_STD(cout) << getQueueName() << "received START" <<
532 PEGASUS_STD(endl);
|
533 kumpf 1.81 #endif
|
534 venkat.puvvada 1.148 PEGASUS_ASSERT(!_isRunning);
535 _isRunning = true;
|
536 kumpf 1.131 _make_response(req, async_results::OK);
537 }
|
538 mday 1.10
|
539 kumpf 1.131 void MessageQueueService::handle_CimServiceStop(CimServiceStop* req)
|
540 mday 1.1 {
|
541 kumpf 1.81 #ifdef MESSAGEQUEUESERVICE_DEBUG
|
542 kumpf 1.131 PEGASUS_STD(cout) << getQueueName() << "received STOP" << PEGASUS_STD(endl);
|
543 kumpf 1.81 #endif
|
544 venkat.puvvada 1.148 PEGASUS_ASSERT(_isRunning);
545 _isRunning = false;
|
546 venkat.puvvada 1.150 _make_response(req, async_results::CIM_SERVICE_STOPPED);
|
547 mday 1.14 }
548
|
549 kumpf 1.131 AsyncOpNode* MessageQueueService::get_op()
|
550 mday 1.1 {
|
551 kumpf 1.131 AsyncOpNode* op = new AsyncOpNode();
|
552 kumpf 1.104
|
553 mday 1.9 op->_state = ASYNC_OPSTATE_UNKNOWN;
|
554 venkat.puvvada 1.147 op->_flags = ASYNC_OPFLAGS_UNKNOWN;
|
555 kumpf 1.104
|
556 mday 1.1 return op;
557 }
558
|
559 kumpf 1.131 void MessageQueueService::return_op(AsyncOpNode* op)
|
560 mday 1.1 {
|
561 kumpf 1.131 delete op;
|
562 mday 1.1 }
563
|
564 mday 1.18
|
565 kumpf 1.104 Boolean MessageQueueService::SendAsync(
|
566 kumpf 1.131 AsyncOpNode* op,
|
567 kumpf 1.104 Uint32 destination,
|
568 kumpf 1.131 void (*callback)(AsyncOpNode*, MessageQueue*, void*),
569 MessageQueue* callback_response_q,
570 void* callback_ptr)
571 {
|
572 venkat.puvvada 1.147 return _sendAsync(
573 op,
574 destination,
575 callback,
576 callback_response_q,
577 callback_ptr,
578 ASYNC_OPFLAGS_CALLBACK);
579
580 }
581
582 Boolean MessageQueueService::_sendAsync(
583 AsyncOpNode* op,
584 Uint32 destination,
585 void (*callback)(AsyncOpNode*, MessageQueue*, void*),
586 MessageQueue* callback_response_q,
587 void* callback_ptr,
588 Uint32 flags)
589 {
|
590 kumpf 1.131 PEGASUS_ASSERT(op != 0 && callback != 0);
591
592 // destination of this message
593 op->_op_dest = MessageQueue::lookup(destination);
|
594 venkat.puvvada 1.147 if (op->_op_dest == 0)
595 {
596 return false;
597 }
598 op->_flags = flags;
|
599 kumpf 1.131 // initialize the callback data
600 // callback function to be executed by recpt. of response
601 op->_async_callback = callback;
602 // the op node
603 op->_callback_node = op;
604 // the queue that will receive the response
605 op->_callback_response_q = callback_response_q;
606 // user data for callback
607 op->_callback_ptr = callback_ptr;
608 // I am the originator of this request
609 op->_callback_request_q = this;
610
611 return _meta_dispatcher->route_async(op);
|
612 mday 1.18 }
613
|
614 kumpf 1.131 Boolean MessageQueueService::SendForget(Message* msg)
615 {
616 AsyncOpNode* op = 0;
617 Uint32 mask = msg->getMask();
618
619 if (mask & MessageMask::ha_async)
620 {
621 op = (static_cast<AsyncMessage *>(msg))->op;
622 }
623
624 if (op == 0)
625 {
626 op = get_op();
627 op->_request.reset(msg);
628 if (mask & MessageMask::ha_async)
629 {
630 (static_cast<AsyncMessage *>(msg))->op = op;
631 }
632 }
|
633 venkat.puvvada 1.147
634 PEGASUS_ASSERT(op->_flags == ASYNC_OPFLAGS_UNKNOWN);
635 PEGASUS_ASSERT(op->_state == ASYNC_OPSTATE_UNKNOWN);
|
636 kumpf 1.131 op->_op_dest = MessageQueue::lookup(msg->dest);
637 if (op->_op_dest == 0)
638 {
639 return_op(op);
640 return false;
641 }
642
|
643 venkat.puvvada 1.147 op->_flags = ASYNC_OPFLAGS_FIRE_AND_FORGET;
644
|
645 kumpf 1.131 // now see if the meta dispatcher will take it
646 return _meta_dispatcher->route_async(op);
647 }
648
649
650 AsyncReply *MessageQueueService::SendWait(AsyncRequest* request)
651 {
652 if (request == 0)
653 return 0;
654
655 Boolean destroy_op = false;
656
657 if (request->op == 0)
658 {
659 request->op = get_op();
660 request->op->_request.reset(request);
661 destroy_op = true;
662 }
|
663 venkat.puvvada 1.147
664 PEGASUS_ASSERT(request->op->_flags == ASYNC_OPFLAGS_UNKNOWN);
665 PEGASUS_ASSERT(request->op->_state == ASYNC_OPSTATE_UNKNOWN);
|
666 kumpf 1.131
667 request->block = false;
|
668 venkat.puvvada 1.147 _sendAsync(
|
669 kumpf 1.131 request->op,
670 request->dest,
671 _sendwait_callback,
672 this,
|
673 venkat.puvvada 1.147 (void *)0,
674 ASYNC_OPFLAGS_PSEUDO_CALLBACK);
|
675 kumpf 1.131
676 request->op->_client_sem.wait();
677
678 AsyncReply* rpl = static_cast<AsyncReply *>(request->op->removeResponse());
679 rpl->op = 0;
680
681 if (destroy_op == true)
682 {
683 request->op->_request.release();
684 return_op(request->op);
685 request->op = 0;
686 }
687 return rpl;
|
688 mday 1.1 }
689
|
690 venkat.puvvada 1.146 Uint32 MessageQueueService::find_service_qid(const String &name)
|
691 mday 1.1 {
|
692 venkat.puvvada 1.146 MessageQueue *queue = MessageQueue::lookup((const char*)name.getCString());
693 PEGASUS_ASSERT(queue);
694 return queue->getQueueId();
|
695 mday 1.1 }
696
|
697 mike 1.120 MessageQueueService::PollingList* MessageQueueService::_get_polling_list()
698 {
699 _polling_list_mutex.lock();
700
701 if (!_polling_list)
|
702 kumpf 1.131 _polling_list = new PollingList;
|
703 mike 1.120
704 _polling_list_mutex.unlock();
|
705 kumpf 1.104
|
706 mike 1.120 return _polling_list;
|
707 mday 1.1 }
708
|
709 venkat.puvvada 1.152 void MessageQueueService::_removeFromPollingList(MessageQueueService *service)
710 {
711 _polling_list_mutex.lock();
712 _polling_list->remove(service);
713 _polling_list_mutex.unlock();
714 }
715
|
716 mday 1.1 PEGASUS_NAMESPACE_END
|