(file) Return to readwritelockatomic.c CVS log (file) (dir) Up to [OMI] / omi / pal

  1 krisbash 1.1 #include "lock.h"
  2              #include "atomic.h"
  3              #include "thread.h"
  4              #include "cpu.h"
  5              
  6              #if defined(PAL_64BIT)
  7              # define FIELD_SIZE 13
  8              # define FIELD_MAX 0x1fff
  9              # define FIELD_SIGN 0x1000
 10              # define SPIN_SIZE 8
 11              # define SPIN_MAX 0xff
 12              # define SPIN_SIGN 0x80
 13              #elif defined(PAL_32BIT)
 14              # define FIELD_SIZE 6
 15              # define FIELD_MAX 0x3f
 16              # define FIELD_SIGN 0x20
 17              # define SPIN_SIZE 4
 18              # define SPIN_MAX 0xf
 19              # define SPIN_SIGN 0x8
 20              #else
 21              # error "unknown pointer size"
 22 krisbash 1.1 #endif /* 32/64 bit */
 23              
 24              #define OWN_MAXSHARED (FIELD_MAX - 1)
 25              #define OWN_EXCLUSIVE FIELD_MAX
 26              #define CurrentTick() CPU_GetTimeStamp()
 27              
 28              #if defined(_MSC_VER)
 29              #pragma warning(disable:4214)
 30              #endif
 31              
 32              typedef struct _LockFields
 33              {
 34                  size_t owners : FIELD_SIZE;
 35                  size_t unfair : 4;
 36                  size_t spin   : SPIN_SIZE;
 37                  size_t entry  : FIELD_SIZE;
 38                  size_t writer : FIELD_SIZE;
 39                  size_t exit   : FIELD_SIZE;
 40              } LockFields;
 41              
 42              #define LockState(p)  ((ptrdiff_t*)p)
 43 krisbash 1.1 #define ReadLockState(p) PAL_PREFETCH((volatile ptrdiff_t*)(p))
 44              #define LockOwners(x) (((LockFields*)&x)->owners)
 45              #define LockUnfair(x) (((LockFields*)&x)->unfair)
 46              #define LockSpin(x)   (((LockFields*)&x)->spin)
 47              #define LockEntry(x)  (((LockFields*)&x)->entry)
 48              #define LockWriter(x) (((LockFields*)&x)->writer)
 49              #define LockExit(x)   (((LockFields*)&x)->exit)
 50              
 51              static int TryAcquireRead(
 52                  _Inout_ ReadWriteLock* self
 53              )
 54              {
 55                  volatile LockFields* lock = (LockFields*)self;
 56                  size_t oldState, state, swapState;
 57              
 58                  for(;;)
 59                  {
 60                      oldState = ReadLockState(lock);
 61                      state = oldState + 1;
 62              
 63                      /* Skipped when owners is the only active field
 64 krisbash 1.1          * and we did not try to add too many shared owners. */
 65                      if (state >= OWN_EXCLUSIVE)
 66                      {
 67                          /* Detect whether adding another shared owner is impossible. */
 68                          if (LockOwners(oldState) >= OWN_MAXSHARED)
 69                              return 0;
 70              
 71                          /* If writer == exit, no writers are waiting. */
 72                          if ((LockWriter(state) ^ LockExit(state)) != 0)
 73                          {
 74                              /* Writers are waiting. Acquiring would jump the queue. */
 75                              if (((CurrentTick() - LockUnfair(oldState)) & 14) != 0)
 76                                  return 0;
 77                          }
 78                      }
 79              
 80                      /* Ready to take shared ownership. */
 81                      swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
 82                      if (swapState == oldState)
 83                          return 1;
 84                  }
 85 krisbash 1.1 }
 86              
 87              static int TryAcquireWrite(
 88                  _Inout_ ReadWriteLock* self
 89              )
 90              {
 91                  volatile LockFields* lock = (LockFields*)self;
 92                  size_t oldState, state, swapState;
 93              
 94                  for(;;)
 95                  {
 96                      oldState = ReadLockState(lock);
 97                      state = oldState | OWN_EXCLUSIVE;
 98              
 99                      /* Skipped when the lock is empty. */
100                      if (oldState != 0)
101                      {
102                          /* Detect whether there are existing owners. */
103                          if (LockOwners(oldState) != 0)
104                              return 0;
105              
106 krisbash 1.1             /* Someone must be waiting. Acquiring would jump the queue. */
107                          if (((CurrentTick() - LockUnfair(oldState)) & 14) != 0)
108                              return 0;
109                      }
110              
111                      /* Ready to take exclusive ownership. */
112                      swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
113                      if (swapState == oldState)
114                          return 1;
115                  }
116              }
117              
118              static void QueueAcquireRead(
119                  _Inout_ ReadWriteLock* self
120              )
121              {
122                  volatile LockFields* lock = (LockFields*)self;
123                  size_t oldState, state, swapState, preQueuedState;
124                  size_t waitFor, diff, key, spinState, spinCount;
125              
126                  for (;;)
127 krisbash 1.1     {
128                      oldState = ReadLockState(lock);
129                      state = oldState;
130              
131                      /* If there is no queue, we are the first one to wait;
132                       * allow unfairness for the current timer tick. */
133                      if (state <= OWN_EXCLUSIVE)
134                          LockUnfair(state) = CurrentTick();
135              
136                      /* Insert a barrier every half revolution.
137                       * This stops writer arithmetic from wrapping. */
138                      if ((LockEntry(state) & ~FIELD_SIGN) == 0)
139                          LockWriter(state) = LockEntry(state);
140              
141                      if (++LockEntry(state) == LockExit(state))
142                      {
143                          /* The queue arithmetic will wrap if we continue. */
144                          Thread_Yield();
145                          continue;
146                      }
147              
148 krisbash 1.1         swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
149                      if (swapState == oldState)
150                          break;
151                  }
152              
153                  /* This thread now has a place in the queue.
154                   * Threads behind us may be depending on us to wake them up. */
155              
156                  /* Wait for the most recent writer to enter the queue. */
157                  waitFor = LockWriter(state);
158                  key = (size_t)lock ^ waitFor;
159                  preQueuedState = oldState;
160                  spinState = LockSpin(oldState);
161              
162                  for (;;)
163                  {
164                      /* Avoid write prefetching since we expect to wait. */
165                      oldState = *(ptrdiff_t*)lock;
166              
167                      diff = LockExit(oldState) - waitFor;
168                      if ((diff & FIELD_SIGN) == 0)
169 krisbash 1.1         {
170                          /* The writer ahead of us in line already acquired.
171                           * Someone could have beat us unfairly.
172                           * Just wait for the current owner. */
173                          waitFor = LockExit(oldState);
174                          key = (size_t)lock ^ waitFor;
175                      }
176              
177                      if ((diff & FIELD_SIGN) != 0 || (LockOwners(oldState) == OWN_EXCLUSIVE))
178                      {
179                          /* The writer ahead of us still hasn't acquired,
180                           * or someone owns the lock exclusively right now. */
181                          if (((CurrentTick() - LockUnfair(oldState)) & 14) == 0 &&
182                              LockEntry(oldState) - LockExit(oldState) >= 2 &&
183                              LockEntry(oldState) == LockEntry(preQueuedState) + 1 &&
184                              (LockOwners(oldState) < OWN_MAXSHARED))
185                          {
186                              /* Under certain conditions, we can acquire immediately if we
187                               * are the last thread in line and undo joining the queue. */
188                              if (preQueuedState <= OWN_EXCLUSIVE)
189                                  state = LockOwners(oldState) + 1;
190 krisbash 1.1                 else
191                              {
192                                  state = oldState + 1;
193                                  LockEntry(state) = LockEntry(preQueuedState);
194                                  LockWriter(state) = LockWriter(preQueuedState);
195                              }
196              
197                              /* Atomically de-queue and acquire unfairly. */
198                              swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
199                              if (swapState == oldState)
200                                  return;
201                              continue;
202                          }
203              
204                          /* spinState being low means spinning usually works.
205                           * Use a high count if it has been working recently. */
206                          spinCount = (spinState & SPIN_SIGN) ?
207                              CONDLOCK_LOW_SPINCOUNT :
208                              CONDLOCK_HIGH_SPINCOUNT;
209              
210                          /* Spin and/or block until something changes.
211 krisbash 1.1              * Adjust the spin field based on whether spinning worked. */
212                          if (CondLock_Wait(key, (ptrdiff_t*)lock, oldState, spinCount))
213                              spinState = (spinState > 2) ? (spinState - 2) : 0;
214                          else
215                              spinState = (spinState < SPIN_MAX) ? (spinState + 1) : spinState;
216                          continue;
217                      }
218                      
219                      if (LockOwners(oldState) == OWN_MAXSHARED)
220                      {
221                          /* The owner arithmetic will overflow if we continue. */
222                          Thread_Yield();
223                          continue;
224                      }
225              
226                      state = oldState + 1;
227              
228                      /* Bump the exit ticket number. We're leaving the queue. */
229                      LockExit(state)++;
230              
231                      /* Zero the top 4 fields if the queue is now empty. */
232 krisbash 1.1         if (LockExit(state) == LockEntry(state))
233                          state = LockOwners(state);
234                      else
235                      {
236                          /* Not empty, but we just acquired fairly.
237                           * Allow unfairness for a while. */
238                          LockUnfair(state) = CurrentTick();
239                          LockSpin(state) = spinState;
240                      }
241              
242                      /* Ready to take shared ownership. */
243                      swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
244                      if (swapState == oldState)
245                          break;
246                  }
247              
248                  if ((LockExit(state) & ~FIELD_SIGN) == 0)
249                  {
250                      /* Wakes those waiting on the artificial barrier inserted each half
251                       * revolution (see above). */
252                      key = (size_t)lock ^ LockExit(state);
253 krisbash 1.1         CondLock_Broadcast(key);
254                  }
255              }
256              
257              static void QueueAcquireWrite(
258                  _Inout_ ReadWriteLock* self
259              )
260              {
261                  volatile LockFields* lock = (LockFields*)self;
262                  size_t oldState, state, swapState, preQueuedState;
263                  size_t waitFor, key, spinState, spinCount;
264              
265                  for (;;)
266                  {
267                      oldState = ReadLockState(lock);
268                      state = oldState;
269              
270                      /* If there is no queue, we are the first one to wait;
271                       * allow unfairness for the current timer tick. */
272                      if (state <= OWN_EXCLUSIVE)
273                          LockUnfair(state) = CurrentTick();
274 krisbash 1.1 
275                      /* Wait for the most recent thread to enter the queue. */
276                      waitFor = LockEntry(state);
277              
278                      if (++LockEntry(state) == LockExit(state))
279                      {
280                          /* The queue arithmetic will wrap if we continue. */
281                          Thread_Yield();
282                          continue;
283                      }
284              
285                      /* Make reader threads coming in wait for us. */
286                      LockWriter(state) = LockEntry(state);
287              
288                      swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
289                      if (swapState == oldState)
290                          break;
291                  }
292              
293                  /* This thread now has a place in the queue.
294                   * Threads behind us may be depending on us to wake them up. */
295 krisbash 1.1 
296                  preQueuedState = oldState;
297                  key = (size_t)lock ^ waitFor;
298                  spinState = LockSpin(oldState);
299              
300                  for (;;)
301                  {
302                      /* Avoid write prefetching since we expect to wait. */
303                      oldState = *(ptrdiff_t*)lock;
304              
305                      if (LockExit(oldState) != waitFor || LockOwners(oldState) != 0)
306                      {
307                          /* The thread ahead of us still hasn't acquired,
308                           * or some reader or writer owns the lock right now. */
309                          if (((CurrentTick() - LockUnfair(oldState)) & 14) == 0 &&
310                              LockEntry(oldState) - LockExit(oldState) >= 2 &&
311                              LockEntry(oldState) == LockEntry(preQueuedState) + 1 &&
312                              LockOwners(oldState) == 0)
313                          {
314                              /* Under certain conditions, we can acquire immediately if we
315                               * are the last thread in line and undo joining the queue. */
316 krisbash 1.1                 if (preQueuedState <= OWN_EXCLUSIVE)
317                                  state = OWN_EXCLUSIVE;
318                              else
319                              {
320                                  state = oldState + OWN_EXCLUSIVE;
321                                  LockEntry(state) = LockEntry(preQueuedState);
322                                  LockWriter(state) = LockWriter(preQueuedState);
323                              }
324              
325                              /* Atomically de-queue and acquire unfairly. */
326                              swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
327                              if (swapState == oldState)
328                                  return;
329                              continue;
330                          }
331              
332                          /* spinState being low means spinning usually works.
333                           * Use a high count if it has been working recently. */
334                          spinCount = (spinState & SPIN_SIGN) ?
335                              CONDLOCK_LOW_SPINCOUNT :
336                              CONDLOCK_HIGH_SPINCOUNT;
337 krisbash 1.1 
338                          /* Spin and/or block until something changes.
339                           * Adjust the spin field based on whether spinning worked. */
340                          if (CondLock_Wait(key, (ptrdiff_t*)lock, oldState, spinCount))
341                              spinState = (spinState > 2) ? (spinState - 2) : 0;
342                          else
343                              spinState = (spinState < SPIN_MAX) ? (spinState + 1) : spinState;
344                          continue;
345                      }
346              
347                      state = oldState + OWN_EXCLUSIVE;
348              
349                      /* Bump the exit ticket number. We're leaving the queue. */
350                      LockExit(state)++;
351              
352                      /* Zero the top 4 fields if the queue is now empty. */
353                      if (LockExit(state) == LockEntry(state))
354                          state = LockOwners(state);
355                      else
356                      {
357                          /* Not empty, but we just acquired fairly.
358 krisbash 1.1              * Allow unfairness for a while. */
359                          LockUnfair(state) = CurrentTick();
360                          LockSpin(state) = spinState;
361                      }
362              
363                      /* Ready to take exclusive ownership. */
364                      swapState = Atomic_CompareAndSwap(LockState(lock), oldState, state);
365                      if (swapState == oldState)
366                          return;
367                  }
368              }
369              
370              int ReadWriteLock_TryAcquireRead(
371                  _Inout_ ReadWriteLock* self
372              )
373              {
374                  return TryAcquireRead(self);
375              }
376              
377              void ReadWriteLock_AcquireRead(
378                  _Inout_ ReadWriteLock* self
379 krisbash 1.1 )
380              {
381                  if (TryAcquireRead(self) != 0)
382                      return;
383              
384                  QueueAcquireRead(self);
385              }
386              
387              int ReadWriteLock_TryAcquireWrite(
388                  _Inout_ ReadWriteLock* self
389              )
390              {
391                  return TryAcquireWrite(self);
392              }
393              
394              void ReadWriteLock_AcquireWrite(
395                  _Inout_ ReadWriteLock* self)
396              {
397                  if (TryAcquireWrite(self) != 0)
398                      return;
399              
400 krisbash 1.1     QueueAcquireWrite(self);
401              }
402              
403              void ReadWriteLock_ReleaseRead(
404                  _Inout_ ReadWriteLock* self)
405              {
406                  volatile LockFields* lock = (LockFields*)self;
407                  size_t state, key;
408              
409                  state = Atomic_Dec(LockState(lock));
410                  if (state >= OWN_EXCLUSIVE && LockOwners(state) == 0)
411                  {
412                      /* There is a queue.
413                       * Writers may be blocked waiting for us to leave. */
414                      key = (size_t)lock ^ LockExit(state);
415                      CondLock_Broadcast(key);
416              
417                      //if (((LockEntry(state) - LockExit(state)) & (FIELD_SIZE - 1)) == 2 &&
418                      if (LockEntry(state) - LockExit(state) >= 2 &&
419                          ((CurrentTick() - LockUnfair(state)) & 14) == 0)
420                      {
421 krisbash 1.1             /* Under certain conditions, encourage the last group of threads in
422                           * line to stop spinning and acquire unfairly. */
423                          if (LockEntry(state) == LockWriter(state))
424                              key = (size_t)lock ^ (LockEntry(state) - 1);
425                          else
426                              key = (size_t)lock ^ LockWriter(state);
427                          CondLock_BroadcastSpinners(key);
428                      }
429                  }
430              }
431              
432              void ReadWriteLock_ReleaseWrite(
433                  _Inout_ ReadWriteLock* self)
434              {
435                  volatile LockFields* lock = (LockFields*)self;
436                  size_t state, key;
437              
438                  state = Atomic_Add(LockState(lock), -OWN_EXCLUSIVE);
439                  if (state != 0)
440                  {
441                      /* There is a queue.
442 krisbash 1.1          * Threads may be blocked waiting for us to leave. */
443                      key = (size_t)lock ^ LockExit(state);
444                      CondLock_Broadcast(key);
445              
446                      //if (((LockEntry(state) - LockExit(state)) & (FIELD_SIZE - 1)) == 2 &&
447                      if (LockEntry(state) - LockExit(state) >= 2 &&
448                          ((CurrentTick() - LockUnfair(state)) & 14) == 0)
449                      {
450                          /* Under certain conditions, encourage the last group of threads in
451                           * line to stop spinning and acquire unfairly. */
452                          if (LockEntry(state) == LockWriter(state))
453                              key = (size_t)lock ^ (LockEntry(state) - 1);
454                          else
455                              key = (size_t)lock ^ LockWriter(state);
456                          CondLock_BroadcastSpinners(key);
457                      }
458                  }
459              }

ViewCVS 0.9.2