(file) Return to Thread.cpp CVS log (file) (dir) Up to [Pegasus] / pegasus / src / Pegasus / Common

Diff for /pegasus/src/Pegasus/Common/Thread.cpp between version 1.1.2.8 and 1.1.2.9

version 1.1.2.8, 2001/10/03 16:55:03 version 1.1.2.9, 2001/10/25 15:41:01
Line 59 
Line 59 
     }     }
     catch(IPCException& e)     catch(IPCException& e)
     {     {
         e = e;  
         delete cu;         delete cu;
         throw;         throw;
     }     }
Line 75 
Line 74 
     }     }
     catch(IPCException& e)     catch(IPCException& e)
     {     {
        e = e;  
         PEGASUS_ASSERT(0);         PEGASUS_ASSERT(0);
     }     }
     if(execute == true)     if(execute == true)
Line 101 
Line 99 
        }        }
        catch(IPCException& e)        catch(IPCException& e)
        {        {
           e = e;  
           PEGASUS_ASSERT(0);           PEGASUS_ASSERT(0);
           break;           break;
        }        }
Line 113 
Line 110 
  
 #endif #endif
  
   
   ThreadPool::ThreadPool(Sint16 initial_size,
                          Sint16 max,
                          Sint16 min,
                          Sint8 *key)
      : _max_threads(max), _min_threads(min),
        _current_threads(0), _waiters(initial_size),
        _pool_sem(0), _pool(true), _running(true),
        _dying(0)
   {
      _allocate_wait.tv_sec = 1;
      _allocate_wait.tv_usec = 0;
      _deallocate_wait.tv_sec = 30;
      _deallocate_wait.tv_usec = 0;
      _deadlock_detect.tv_sec = 60;
      _deadlock_detect.tv_usec = 0;
      memset(_key, 0x00, 17);
      if(key != 0)
         strncpy(_key, key, 16);
      if(_max_threads < initial_size)
         _max_threads = initial_size;
      if(_min_threads > initial_size)
         _min_threads = initial_size;
   
      int i;
      for(i = 0; i < initial_size; i++)
      {
         _link_pool(_init_thread());
      }
   }
   
   ThreadPool::~ThreadPool(void)
   {
      _dying++;
      Thread *th = _pool.remove_first();
      while(th != 0)
      {
         // signal the thread's sleep semaphore
         th->cancel();
         th->join();
         th->empty_tsd();
         delete th;
         th = _pool.remove_first();
      }
   }
   
   // make this static to the class
   PEGASUS_THREAD_RETURN PEGASUS_THREAD_CDECL ThreadPool::_loop(void *parm)
   {
      Thread *myself = (Thread *)parm;
      if(myself == 0)
         throw NullPointer();
      ThreadPool *pool = (ThreadPool *)myself->get_parm();
      if(pool == 0 )
         throw NullPointer();
      Semaphore *sleep_sem;
      struct timeval *deadlock_timer;
   
      try
      {
         sleep_sem = (Semaphore *)myself->reference_tsd("sleep sem");
         myself->dereference_tsd();
         deadlock_timer = (struct timeval *)myself->reference_tsd("deadlock timer");
         myself->dereference_tsd();
      }
      catch(IPCException & e)
      {
         myself->exit_self(0);
      }
      if(sleep_sem == 0 || deadlock_timer == 0)
         throw NullPointer();
   
      while(pool->_dying < 1)
      {
         myself->test_cancel();
         sleep_sem->wait();
         // when we awaken we reside on the running queue, not the pool queue
         myself->test_cancel();
         gettimeofday(deadlock_timer, NULL);
   
         PEGASUS_THREAD_RETURN (PEGASUS_THREAD_CDECL *_work)(void *);
         void *parm;
   
         try
         {
            _work = (PEGASUS_THREAD_RETURN (PEGASUS_THREAD_CDECL *)(void *)) \
               myself->reference_tsd("work func");
            myself->dereference_tsd();
            parm = myself->reference_tsd("work parm");
            myself->dereference_tsd();
         }
         catch(IPCException & e)
         {
            myself->exit_self(0);
         }
   
         if(_work == 0)
            throw NullPointer();
         _work(parm);
   
         // put myself back onto the available list
         try
         {
            pool->_running.remove((void *)myself);
            pool->_link_pool(myself);
         }
         catch(IPCException & e)
         {
            myself->exit_self(0);
         }
      }
      myself->exit_self(0);
      return((PEGASUS_THREAD_RETURN)0);
   }
   
   
   void ThreadPool::allocate_and_awaken(void *parm,
                                        PEGASUS_THREAD_RETURN \
                                        (PEGASUS_THREAD_CDECL *work)(void *))
      throw(IPCException)
   {
      struct timeval start;
      gettimeofday(&start, NULL);
   
      Thread *th = _pool.remove_first();
   
      while (th == 0 && _dying < 1)
      {
         try  // we couldn't get a free thread from the pool
         {
            // wait for the right interval and try again
            while(th == 0 && _dying < 1)
            {
               _check_deadlock(&start);
               Uint32 interval = _allocate_wait.tv_sec * 1000;
               if(_allocate_wait.tv_usec > 0)
                  interval += (_deallocate_wait.tv_usec / 1000);
               // will throw a timeout if no thread comes free
               _pool_sem.time_wait(interval);
               th = _pool.remove_first();
            }
         }
         catch(TimeOut & to)
         {
            if(_current_threads < _max_threads)
            {
               th = _init_thread();
               break;
            }
         }
         // will throw a Deadlock Exception before falling out of the loop
         _check_deadlock(&start);
      } // while th == null
   
      if(_dying < 1)
      {
         // initialize the thread data with the work function and parameters
         th->remove_tsd("work func");
         th->put_tsd("work func", NULL,
                     sizeof( PEGASUS_THREAD_RETURN (PEGASUS_THREAD_CDECL *)(void *)),
                     (void *)work);
         th->remove_tsd("work parm");
         th->put_tsd("work parm", NULL, sizeof(void *), parm);
   
         // put the thread on the running list
         _running.insert_first(th);
   
         // signal the thread's sleep semaphore to awaken it
         Semaphore *sleep_sem = (Semaphore *)th->reference_tsd("sleep sem");
         if(sleep_sem == 0)
            throw NullPointer();
         sleep_sem->signal();
      }
      else
         _pool.insert_first(th);
   }
   
   // caller is responsible for only calling this routine during slack periods
   // but should call it at least once per _deadlock_detect with the running q
   // and at least once per _deallocate_wait for the pool q
   
   void ThreadPool::_kill_dead_threads(DQueue<Thread> *q, Boolean (*check)(struct timeval *))
      throw(IPCException)
   {
      struct timeval now;
      gettimeofday(&now, NULL);
   
      DQueue<Thread> dead ;
   
      if(q->count() > 0 )
      {
         try
         {
            q->try_lock();
         }
         catch(AlreadyLocked & a)
         {
            return;
         }
   
         Thread *context = 0;
         struct timeval dt = { 0, 0 };
         struct timeval *dtp;
         Thread *th = q->next(context);
         while (th != 0 )
         {
            try
            {
               dtp = (struct timeval *)th->try_reference_tsd("deadlock timer");
            }
            catch(AlreadyLocked & a)
            {
               context = th;
               th = q->next(context);
               continue;
            }
   
            if(dtp != 0)
            {
               memcpy(&dt, dtp, sizeof(struct timeval));
   
            }
            th->dereference_tsd();
            if( true == check(&dt))
            {
               th = q->remove_no_lock((void *)th);
   
               if(th != 0)
               {
                  dead.insert_first(th);
                  th = 0;
               }
            }
            context = th;
            th = q->next(context);
         }
         q->unlock();
      }
   
      if(dead.count())
      {
         Thread *th = dead.remove_first();
         while(th != 0)
         {
            th->cancel();
            th->join();
            delete th;
            th = dead.remove_first();
         }
      }
      return;
   }
   
   Boolean ThreadPool::_check_time(struct timeval *start, struct timeval *interval)
   {
      struct timeval now;
      gettimeofday(&now, NULL);
      if( (now.tv_sec - start->tv_sec) > interval->tv_sec ||
          (((now.tv_sec - start->tv_sec) == interval->tv_sec) &&
           ((now.tv_usec - start->tv_usec) >= interval->tv_usec ) ) )
         return true;
      else
         return false;
   }
   
   void ThreadPool::_sleep_sem_del(void *p)
   {
      if(p != 0)
      {
         delete (Semaphore *)p;
      }
   }
   
   inline void ThreadPool::_check_deadlock(struct timeval *start) throw(Deadlock)
   {
      if (true == _check_time(start, &_deadlock_detect))
         throw Deadlock(pegasus_thread_self());
      return;
   }
   
   
   inline Boolean ThreadPool::_check_deadlock_no_throw(struct timeval *start)
   {
      return(_check_time(start, &_deadlock_detect));
   }
   
   inline Boolean ThreadPool::_check_dealloc(struct timeval *start)
   {
      return(_check_time(start, &_deallocate_wait));
   }
   
   inline Thread *ThreadPool::_init_thread(void) throw(IPCException)
   {
      Thread *th = (Thread *) new Thread(&_loop, this, false);
      // allocate a sleep semaphore and pass it in the thread context
      // initial count is zero, loop function will sleep until
      // we signal the semaphore
      Semaphore *sleep_sem = (Semaphore *) new Semaphore(0);
      th->put_tsd("sleep sem", &_sleep_sem_del, sizeof(Semaphore), (void *)sleep_sem);
      struct timeval *dldt = (struct timeval *) ::operator new(sizeof(struct timeval));
      th->put_tsd("deadlock timer", thread_data::default_delete, sizeof(struct timeval), (void *)dldt);
      // thread will enter _loop(void *) and sleep on sleep_sem until we signal it
      th->run();
      _current_threads++;
      return th;
   }
   
   inline void ThreadPool::_link_pool(Thread *th) throw(IPCException)
   {
      if(th == 0)
         throw NullPointer();
      _pool.insert_first(th);
      _pool_sem.signal();
   }
   
   
 PEGASUS_NAMESPACE_END PEGASUS_NAMESPACE_END
  


Legend:
Removed from v.1.1.2.8  
changed lines
  Added in v.1.1.2.9

No CVS admin address has been configured
Powered by
ViewCVS 0.9.2