1 : /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 : /* ***** BEGIN LICENSE BLOCK *****
3 : * Version: MPL 1.1/GPL 2.0/LGPL 2.1
4 : *
5 : * The contents of this file are subject to the Mozilla Public License Version
6 : * 1.1 (the "License"); you may not use this file except in compliance with
7 : * the License. You may obtain a copy of the License at
8 : * http://www.mozilla.org/MPL/
9 : *
10 : * Software distributed under the License is distributed on an "AS IS" basis,
11 : * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12 : * for the specific language governing rights and limitations under the
13 : * License.
14 : *
15 : * The Original Code is the Netscape Portable Runtime (NSPR).
16 : *
17 : * The Initial Developer of the Original Code is
18 : * Netscape Communications Corporation.
19 : * Portions created by the Initial Developer are Copyright (C) 1998-2000
20 : * the Initial Developer. All Rights Reserved.
21 : *
22 : * Contributor(s):
23 : *
24 : * Alternatively, the contents of this file may be used under the terms of
25 : * either the GNU General Public License Version 2 or later (the "GPL"), or
26 : * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
27 : * in which case the provisions of the GPL or the LGPL are applicable instead
28 : * of those above. If you wish to allow use of your version of this file only
29 : * under the terms of either the GPL or the LGPL, and not to allow others to
30 : * use your version of this file under the terms of the MPL, indicate your
31 : * decision by deleting the provisions above and replace them with the notice
32 : * and other provisions required by the GPL or the LGPL. If you do not delete
33 : * the provisions above, a recipient may use your version of this file under
34 : * the terms of any one of the MPL, the GPL or the LGPL.
35 : *
36 : * ***** END LICENSE BLOCK ***** */
37 :
38 : #include "primpl.h"
39 : #include "pprmwait.h"
40 :
41 : #define _MW_REHASH_MAX 11
42 :
43 : static PRLock *mw_lock = NULL;
44 : static _PRGlobalState *mw_state = NULL;
45 :
46 : static PRIntervalTime max_polling_interval;
47 :
48 : #ifdef WINNT
49 :
50 : typedef struct TimerEvent {
51 : PRIntervalTime absolute;
52 : void (*func)(void *);
53 : void *arg;
54 : LONG ref_count;
55 : PRCList links;
56 : } TimerEvent;
57 :
58 : #define TIMER_EVENT_PTR(_qp) \
59 : ((TimerEvent *) ((char *) (_qp) - offsetof(TimerEvent, links)))
60 :
61 : struct {
62 : PRLock *ml;
63 : PRCondVar *new_timer;
64 : PRCondVar *cancel_timer;
65 : PRThread *manager_thread;
66 : PRCList timer_queue;
67 : } tm_vars;
68 :
69 : static PRStatus TimerInit(void);
70 : static void TimerManager(void *arg);
71 : static TimerEvent *CreateTimer(PRIntervalTime timeout,
72 : void (*func)(void *), void *arg);
73 : static PRBool CancelTimer(TimerEvent *timer);
74 :
75 : static void TimerManager(void *arg)
76 : {
77 : PRIntervalTime now;
78 : PRIntervalTime timeout;
79 : PRCList *head;
80 : TimerEvent *timer;
81 :
82 : PR_Lock(tm_vars.ml);
83 : while (1)
84 : {
85 : if (PR_CLIST_IS_EMPTY(&tm_vars.timer_queue))
86 : {
87 : PR_WaitCondVar(tm_vars.new_timer, PR_INTERVAL_NO_TIMEOUT);
88 : }
89 : else
90 : {
91 : now = PR_IntervalNow();
92 : head = PR_LIST_HEAD(&tm_vars.timer_queue);
93 : timer = TIMER_EVENT_PTR(head);
94 : if ((PRInt32) (now - timer->absolute) >= 0)
95 : {
96 : PR_REMOVE_LINK(head);
97 : /*
98 : * make its prev and next point to itself so that
99 : * it's obvious that it's not on the timer_queue.
100 : */
101 : PR_INIT_CLIST(head);
102 : PR_ASSERT(2 == timer->ref_count);
103 : PR_Unlock(tm_vars.ml);
104 : timer->func(timer->arg);
105 : PR_Lock(tm_vars.ml);
106 : timer->ref_count -= 1;
107 : if (0 == timer->ref_count)
108 : {
109 : PR_NotifyAllCondVar(tm_vars.cancel_timer);
110 : }
111 : }
112 : else
113 : {
114 : timeout = (PRIntervalTime)(timer->absolute - now);
115 : PR_WaitCondVar(tm_vars.new_timer, timeout);
116 : }
117 : }
118 : }
119 : PR_Unlock(tm_vars.ml);
120 : }
121 :
122 : static TimerEvent *CreateTimer(
123 : PRIntervalTime timeout,
124 : void (*func)(void *),
125 : void *arg)
126 : {
127 : TimerEvent *timer;
128 : PRCList *links, *tail;
129 : TimerEvent *elem;
130 :
131 : timer = PR_NEW(TimerEvent);
132 : if (NULL == timer)
133 : {
134 : PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
135 : return timer;
136 : }
137 : timer->absolute = PR_IntervalNow() + timeout;
138 : timer->func = func;
139 : timer->arg = arg;
140 : timer->ref_count = 2;
141 : PR_Lock(tm_vars.ml);
142 : tail = links = PR_LIST_TAIL(&tm_vars.timer_queue);
143 : while (links->prev != tail)
144 : {
145 : elem = TIMER_EVENT_PTR(links);
146 : if ((PRInt32)(timer->absolute - elem->absolute) >= 0)
147 : {
148 : break;
149 : }
150 : links = links->prev;
151 : }
152 : PR_INSERT_AFTER(&timer->links, links);
153 : PR_NotifyCondVar(tm_vars.new_timer);
154 : PR_Unlock(tm_vars.ml);
155 : return timer;
156 : }
157 :
158 : static PRBool CancelTimer(TimerEvent *timer)
159 : {
160 : PRBool canceled = PR_FALSE;
161 :
162 : PR_Lock(tm_vars.ml);
163 : timer->ref_count -= 1;
164 : if (timer->links.prev == &timer->links)
165 : {
166 : while (timer->ref_count == 1)
167 : {
168 : PR_WaitCondVar(tm_vars.cancel_timer, PR_INTERVAL_NO_TIMEOUT);
169 : }
170 : }
171 : else
172 : {
173 : PR_REMOVE_LINK(&timer->links);
174 : canceled = PR_TRUE;
175 : }
176 : PR_Unlock(tm_vars.ml);
177 : PR_DELETE(timer);
178 : return canceled;
179 : }
180 :
181 : static PRStatus TimerInit(void)
182 : {
183 : tm_vars.ml = PR_NewLock();
184 : if (NULL == tm_vars.ml)
185 : {
186 : goto failed;
187 : }
188 : tm_vars.new_timer = PR_NewCondVar(tm_vars.ml);
189 : if (NULL == tm_vars.new_timer)
190 : {
191 : goto failed;
192 : }
193 : tm_vars.cancel_timer = PR_NewCondVar(tm_vars.ml);
194 : if (NULL == tm_vars.cancel_timer)
195 : {
196 : goto failed;
197 : }
198 : PR_INIT_CLIST(&tm_vars.timer_queue);
199 : tm_vars.manager_thread = PR_CreateThread(
200 : PR_SYSTEM_THREAD, TimerManager, NULL, PR_PRIORITY_NORMAL,
201 : PR_LOCAL_THREAD, PR_UNJOINABLE_THREAD, 0);
202 : if (NULL == tm_vars.manager_thread)
203 : {
204 : goto failed;
205 : }
206 : return PR_SUCCESS;
207 :
208 : failed:
209 : if (NULL != tm_vars.cancel_timer)
210 : {
211 : PR_DestroyCondVar(tm_vars.cancel_timer);
212 : }
213 : if (NULL != tm_vars.new_timer)
214 : {
215 : PR_DestroyCondVar(tm_vars.new_timer);
216 : }
217 : if (NULL != tm_vars.ml)
218 : {
219 : PR_DestroyLock(tm_vars.ml);
220 : }
221 : return PR_FAILURE;
222 : }
223 :
224 : #endif /* WINNT */
225 :
226 : /******************************************************************/
227 : /******************************************************************/
228 : /************************ The private portion *********************/
229 : /******************************************************************/
230 : /******************************************************************/
231 20034 : void _PR_InitMW(void)
232 : {
233 : #ifdef WINNT
234 : /*
235 : * We use NT 4's InterlockedCompareExchange() to operate
236 : * on PRMWStatus variables.
237 : */
238 : PR_ASSERT(sizeof(LONG) == sizeof(PRMWStatus));
239 : TimerInit();
240 : #endif
241 20034 : mw_lock = PR_NewLock();
242 20034 : PR_ASSERT(NULL != mw_lock);
243 20034 : mw_state = PR_NEWZAP(_PRGlobalState);
244 20034 : PR_ASSERT(NULL != mw_state);
245 20034 : PR_INIT_CLIST(&mw_state->group_list);
246 20034 : max_polling_interval = PR_MillisecondsToInterval(MAX_POLLING_INTERVAL);
247 20034 : } /* _PR_InitMW */
248 :
249 140 : void _PR_CleanupMW(void)
250 : {
251 140 : PR_DestroyLock(mw_lock);
252 140 : mw_lock = NULL;
253 140 : if (mw_state->group) {
254 0 : PR_DestroyWaitGroup(mw_state->group);
255 : /* mw_state->group is set to NULL as a side effect. */
256 : }
257 140 : PR_DELETE(mw_state);
258 140 : } /* _PR_CleanupMW */
259 :
260 0 : static PRWaitGroup *MW_Init2(void)
261 : {
262 0 : PRWaitGroup *group = mw_state->group; /* it's the null group */
263 0 : if (NULL == group) /* there is this special case */
264 : {
265 0 : group = PR_CreateWaitGroup(_PR_DEFAULT_HASH_LENGTH);
266 0 : if (NULL == group) goto failed_alloc;
267 0 : PR_Lock(mw_lock);
268 0 : if (NULL == mw_state->group)
269 : {
270 0 : mw_state->group = group;
271 0 : group = NULL;
272 : }
273 0 : PR_Unlock(mw_lock);
274 0 : if (group != NULL) (void)PR_DestroyWaitGroup(group);
275 0 : group = mw_state->group; /* somebody beat us to it */
276 : }
277 : failed_alloc:
278 0 : return group; /* whatever */
279 : } /* MW_Init2 */
280 :
281 0 : static _PR_HashStory MW_AddHashInternal(PRRecvWait *desc, _PRWaiterHash *hash)
282 : {
283 : /*
284 : ** The entries are put in the table using the fd (PRFileDesc*) of
285 : ** the receive descriptor as the key. This allows us to locate
286 : ** the appropriate entry aqain when the poll operation finishes.
287 : **
288 : ** The pointer to the file descriptor object is first divided by
289 : ** the natural alignment of a pointer in the belief that object
290 : ** will have at least that many zeros in the low order bits.
291 : ** This may not be a good assuption.
292 : **
293 : ** We try to put the entry in by rehashing _MW_REHASH_MAX times. After
294 : ** that we declare defeat and force the table to be reconstructed.
295 : ** Since some fds might be added more than once, won't that cause
296 : ** collisions even in an empty table?
297 : */
298 0 : PRIntn rehash = _MW_REHASH_MAX;
299 : PRRecvWait **waiter;
300 0 : PRUintn hidx = _MW_HASH(desc->fd, hash->length);
301 0 : PRUintn hoffset = 0;
302 :
303 0 : while (rehash-- > 0)
304 : {
305 0 : waiter = &hash->recv_wait;
306 0 : if (NULL == waiter[hidx])
307 : {
308 0 : waiter[hidx] = desc;
309 0 : hash->count += 1;
310 : #if 0
311 : printf("Adding 0x%x->0x%x ", desc, desc->fd);
312 : printf(
313 : "table[%u:%u:*%u]: 0x%x->0x%x\n",
314 : hidx, hash->count, hash->length, waiter[hidx], waiter[hidx]->fd);
315 : #endif
316 0 : return _prmw_success;
317 : }
318 0 : if (desc == waiter[hidx])
319 : {
320 0 : PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); /* desc already in table */
321 0 : return _prmw_error;
322 : }
323 : #if 0
324 : printf("Failing 0x%x->0x%x ", desc, desc->fd);
325 : printf(
326 : "table[*%u:%u:%u]: 0x%x->0x%x\n",
327 : hidx, hash->count, hash->length, waiter[hidx], waiter[hidx]->fd);
328 : #endif
329 0 : if (0 == hoffset)
330 : {
331 0 : hoffset = _MW_HASH2(desc->fd, hash->length);
332 0 : PR_ASSERT(0 != hoffset);
333 : }
334 0 : hidx = (hidx + hoffset) % (hash->length);
335 : }
336 0 : return _prmw_rehash;
337 : } /* MW_AddHashInternal */
338 :
339 0 : static _PR_HashStory MW_ExpandHashInternal(PRWaitGroup *group)
340 : {
341 : PRRecvWait **desc;
342 : PRUint32 pidx, length;
343 0 : _PRWaiterHash *newHash, *oldHash = group->waiter;
344 : PRBool retry;
345 : _PR_HashStory hrv;
346 :
347 : static const PRInt32 prime_number[] = {
348 : _PR_DEFAULT_HASH_LENGTH, 179, 521, 907, 1427,
349 : 2711, 3917, 5021, 8219, 11549, 18911, 26711, 33749, 44771};
350 0 : PRUintn primes = (sizeof(prime_number) / sizeof(PRInt32));
351 :
352 : /* look up the next size we'd like to use for the hash table */
353 0 : for (pidx = 0; pidx < primes; ++pidx)
354 : {
355 0 : if (prime_number[pidx] == oldHash->length)
356 : {
357 0 : break;
358 : }
359 : }
360 : /* table size must be one of the prime numbers */
361 0 : PR_ASSERT(pidx < primes);
362 :
363 : /* if pidx == primes - 1, we can't expand the table any more */
364 0 : while (pidx < primes - 1)
365 : {
366 : /* next size */
367 0 : ++pidx;
368 0 : length = prime_number[pidx];
369 :
370 : /* allocate the new hash table and fill it in with the old */
371 0 : newHash = (_PRWaiterHash*)PR_CALLOC(
372 : sizeof(_PRWaiterHash) + (length * sizeof(PRRecvWait*)));
373 0 : if (NULL == newHash)
374 : {
375 0 : PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
376 0 : return _prmw_error;
377 : }
378 :
379 0 : newHash->length = length;
380 0 : retry = PR_FALSE;
381 0 : for (desc = &oldHash->recv_wait;
382 0 : newHash->count < oldHash->count; ++desc)
383 : {
384 0 : PR_ASSERT(desc < &oldHash->recv_wait + oldHash->length);
385 0 : if (NULL != *desc)
386 : {
387 0 : hrv = MW_AddHashInternal(*desc, newHash);
388 0 : PR_ASSERT(_prmw_error != hrv);
389 0 : if (_prmw_success != hrv)
390 : {
391 0 : PR_DELETE(newHash);
392 0 : retry = PR_TRUE;
393 0 : break;
394 : }
395 : }
396 : }
397 0 : if (retry) continue;
398 :
399 0 : PR_DELETE(group->waiter);
400 0 : group->waiter = newHash;
401 0 : group->p_timestamp += 1;
402 0 : return _prmw_success;
403 : }
404 :
405 0 : PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
406 0 : return _prmw_error; /* we're hosed */
407 : } /* MW_ExpandHashInternal */
408 :
409 : #ifndef WINNT
410 0 : static void _MW_DoneInternal(
411 : PRWaitGroup *group, PRRecvWait **waiter, PRMWStatus outcome)
412 : {
413 : /*
414 : ** Add this receive wait object to the list of finished I/O
415 : ** operations for this particular group. If there are other
416 : ** threads waiting on the group, notify one. If not, arrange
417 : ** for this thread to return.
418 : */
419 :
420 : #if 0
421 : printf("Removing 0x%x->0x%x\n", *waiter, (*waiter)->fd);
422 : #endif
423 0 : (*waiter)->outcome = outcome;
424 0 : PR_APPEND_LINK(&((*waiter)->internal), &group->io_ready);
425 0 : PR_NotifyCondVar(group->io_complete);
426 0 : PR_ASSERT(0 != group->waiter->count);
427 0 : group->waiter->count -= 1;
428 0 : *waiter = NULL;
429 0 : } /* _MW_DoneInternal */
430 : #endif /* WINNT */
431 :
432 0 : static PRRecvWait **_MW_LookupInternal(PRWaitGroup *group, PRFileDesc *fd)
433 : {
434 : /*
435 : ** Find the receive wait object corresponding to the file descriptor.
436 : ** Only search the wait group specified.
437 : */
438 : PRRecvWait **desc;
439 0 : PRIntn rehash = _MW_REHASH_MAX;
440 0 : _PRWaiterHash *hash = group->waiter;
441 0 : PRUintn hidx = _MW_HASH(fd, hash->length);
442 0 : PRUintn hoffset = 0;
443 :
444 0 : while (rehash-- > 0)
445 : {
446 0 : desc = (&hash->recv_wait) + hidx;
447 0 : if ((*desc != NULL) && ((*desc)->fd == fd)) return desc;
448 0 : if (0 == hoffset)
449 : {
450 0 : hoffset = _MW_HASH2(fd, hash->length);
451 0 : PR_ASSERT(0 != hoffset);
452 : }
453 0 : hidx = (hidx + hoffset) % (hash->length);
454 : }
455 0 : return NULL;
456 : } /* _MW_LookupInternal */
457 :
458 : #ifndef WINNT
459 0 : static PRStatus _MW_PollInternal(PRWaitGroup *group)
460 : {
461 : PRRecvWait **waiter;
462 0 : PRStatus rv = PR_FAILURE;
463 : PRInt32 count, count_ready;
464 : PRIntervalTime polling_interval;
465 :
466 0 : group->poller = PR_GetCurrentThread();
467 :
468 : while (PR_TRUE)
469 : {
470 : PRIntervalTime now, since_last_poll;
471 : PRPollDesc *poll_list;
472 :
473 0 : while (0 == group->waiter->count)
474 : {
475 : PRStatus st;
476 0 : st = PR_WaitCondVar(group->new_business, PR_INTERVAL_NO_TIMEOUT);
477 0 : if (_prmw_running != group->state)
478 : {
479 0 : PR_SetError(PR_INVALID_STATE_ERROR, 0);
480 0 : goto aborted;
481 : }
482 0 : if (_MW_ABORTED(st)) goto aborted;
483 : }
484 :
485 : /*
486 : ** There's something to do. See if our existing polling list
487 : ** is large enough for what we have to do?
488 : */
489 :
490 0 : while (group->polling_count < group->waiter->count)
491 : {
492 0 : PRUint32 old_count = group->waiter->count;
493 0 : PRUint32 new_count = PR_ROUNDUP(old_count, _PR_POLL_COUNT_FUDGE);
494 0 : PRSize new_size = sizeof(PRPollDesc) * new_count;
495 0 : PRPollDesc *old_polling_list = group->polling_list;
496 :
497 0 : PR_Unlock(group->ml);
498 0 : poll_list = (PRPollDesc*)PR_CALLOC(new_size);
499 0 : if (NULL == poll_list)
500 : {
501 0 : PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
502 0 : PR_Lock(group->ml);
503 0 : goto failed_alloc;
504 : }
505 0 : if (NULL != old_polling_list)
506 0 : PR_DELETE(old_polling_list);
507 0 : PR_Lock(group->ml);
508 0 : if (_prmw_running != group->state)
509 : {
510 0 : PR_SetError(PR_INVALID_STATE_ERROR, 0);
511 0 : goto aborted;
512 : }
513 0 : group->polling_list = poll_list;
514 0 : group->polling_count = new_count;
515 : }
516 :
517 0 : now = PR_IntervalNow();
518 0 : polling_interval = max_polling_interval;
519 0 : since_last_poll = now - group->last_poll;
520 :
521 0 : waiter = &group->waiter->recv_wait;
522 0 : poll_list = group->polling_list;
523 0 : for (count = 0; count < group->waiter->count; ++waiter)
524 : {
525 0 : PR_ASSERT(waiter < &group->waiter->recv_wait
526 : + group->waiter->length);
527 0 : if (NULL != *waiter) /* a live one! */
528 : {
529 0 : if ((PR_INTERVAL_NO_TIMEOUT != (*waiter)->timeout)
530 0 : && (since_last_poll >= (*waiter)->timeout))
531 0 : _MW_DoneInternal(group, waiter, PR_MW_TIMEOUT);
532 : else
533 : {
534 0 : if (PR_INTERVAL_NO_TIMEOUT != (*waiter)->timeout)
535 : {
536 0 : (*waiter)->timeout -= since_last_poll;
537 0 : if ((*waiter)->timeout < polling_interval)
538 0 : polling_interval = (*waiter)->timeout;
539 : }
540 0 : PR_ASSERT(poll_list < group->polling_list
541 : + group->polling_count);
542 0 : poll_list->fd = (*waiter)->fd;
543 0 : poll_list->in_flags = PR_POLL_READ;
544 0 : poll_list->out_flags = 0;
545 : #if 0
546 : printf(
547 : "Polling 0x%x[%d]: [fd: 0x%x, tmo: %u]\n",
548 : poll_list, count, poll_list->fd, (*waiter)->timeout);
549 : #endif
550 0 : poll_list += 1;
551 0 : count += 1;
552 : }
553 : }
554 : }
555 :
556 0 : PR_ASSERT(count == group->waiter->count);
557 :
558 : /*
559 : ** If there are no more threads waiting for completion,
560 : ** we need to return.
561 : */
562 0 : if ((!PR_CLIST_IS_EMPTY(&group->io_ready))
563 0 : && (1 == group->waiting_threads)) break;
564 :
565 0 : if (0 == count) continue; /* wait for new business */
566 :
567 0 : group->last_poll = now;
568 :
569 0 : PR_Unlock(group->ml);
570 :
571 0 : count_ready = PR_Poll(group->polling_list, count, polling_interval);
572 :
573 0 : PR_Lock(group->ml);
574 :
575 0 : if (_prmw_running != group->state)
576 : {
577 0 : PR_SetError(PR_INVALID_STATE_ERROR, 0);
578 0 : goto aborted;
579 : }
580 0 : if (-1 == count_ready)
581 : {
582 0 : goto failed_poll; /* that's a shame */
583 : }
584 0 : else if (0 < count_ready)
585 : {
586 0 : for (poll_list = group->polling_list; count > 0;
587 0 : poll_list++, count--)
588 : {
589 0 : PR_ASSERT(
590 : poll_list < group->polling_list + group->polling_count);
591 0 : if (poll_list->out_flags != 0)
592 : {
593 0 : waiter = _MW_LookupInternal(group, poll_list->fd);
594 : /*
595 : ** If 'waiter' is NULL, that means the wait receive
596 : ** descriptor has been canceled.
597 : */
598 0 : if (NULL != waiter)
599 0 : _MW_DoneInternal(group, waiter, PR_MW_SUCCESS);
600 : }
601 : }
602 : }
603 : /*
604 : ** If there are no more threads waiting for completion,
605 : ** we need to return.
606 : ** This thread was "borrowed" to do the polling, but it really
607 : ** belongs to the client.
608 : */
609 0 : if ((!PR_CLIST_IS_EMPTY(&group->io_ready))
610 0 : && (1 == group->waiting_threads)) break;
611 0 : }
612 :
613 0 : rv = PR_SUCCESS;
614 :
615 : aborted:
616 : failed_poll:
617 : failed_alloc:
618 0 : group->poller = NULL; /* we were that, not we ain't */
619 0 : if ((_prmw_running == group->state) && (group->waiting_threads > 1))
620 : {
621 : /* Wake up one thread to become the new poller. */
622 0 : PR_NotifyCondVar(group->io_complete);
623 : }
624 0 : return rv; /* we return with the lock held */
625 : } /* _MW_PollInternal */
626 : #endif /* !WINNT */
627 :
628 0 : static PRMWGroupState MW_TestForShutdownInternal(PRWaitGroup *group)
629 : {
630 0 : PRMWGroupState rv = group->state;
631 : /*
632 : ** Looking at the group's fields is safe because
633 : ** once the group's state is no longer running, it
634 : ** cannot revert and there is a safe check on entry
635 : ** to make sure no more threads are made to wait.
636 : */
637 0 : if ((_prmw_stopping == rv)
638 0 : && (0 == group->waiting_threads))
639 : {
640 0 : rv = group->state = _prmw_stopped;
641 0 : PR_NotifyCondVar(group->mw_manage);
642 : }
643 0 : return rv;
644 : } /* MW_TestForShutdownInternal */
645 :
646 : #ifndef WINNT
647 0 : static void _MW_InitialRecv(PRCList *io_ready)
648 : {
649 0 : PRRecvWait *desc = (PRRecvWait*)io_ready;
650 0 : if ((NULL == desc->buffer.start)
651 0 : || (0 == desc->buffer.length))
652 0 : desc->bytesRecv = 0;
653 : else
654 : {
655 0 : desc->bytesRecv = (desc->fd->methods->recv)(
656 : desc->fd, desc->buffer.start,
657 0 : desc->buffer.length, 0, desc->timeout);
658 0 : if (desc->bytesRecv < 0) /* SetError should already be there */
659 0 : desc->outcome = PR_MW_FAILURE;
660 : }
661 0 : } /* _MW_InitialRecv */
662 : #endif
663 :
664 : #ifdef WINNT
665 : static void NT_TimeProc(void *arg)
666 : {
667 : _MDOverlapped *overlapped = (_MDOverlapped *)arg;
668 : PRRecvWait *desc = overlapped->data.mw.desc;
669 : PRFileDesc *bottom;
670 :
671 : if (InterlockedCompareExchange((LONG *)&desc->outcome,
672 : (LONG)PR_MW_TIMEOUT, (LONG)PR_MW_PENDING) != (LONG)PR_MW_PENDING)
673 : {
674 : /* This wait recv descriptor has already completed. */
675 : return;
676 : }
677 :
678 : /* close the osfd to abort the outstanding async io request */
679 : /* $$$$
680 : ** Little late to be checking if NSPR's on the bottom of stack,
681 : ** but if we don't check, we can't assert that the private data
682 : ** is what we think it is.
683 : ** $$$$
684 : */
685 : bottom = PR_GetIdentitiesLayer(desc->fd, PR_NSPR_IO_LAYER);
686 : PR_ASSERT(NULL != bottom);
687 : if (NULL != bottom) /* now what!?!?! */
688 : {
689 : bottom->secret->state = _PR_FILEDESC_CLOSED;
690 : if (closesocket(bottom->secret->md.osfd) == SOCKET_ERROR)
691 : {
692 : fprintf(stderr, "closesocket failed: %d\n", WSAGetLastError());
693 : PR_ASSERT(!"What shall I do?");
694 : }
695 : }
696 : return;
697 : } /* NT_TimeProc */
698 :
699 : static PRStatus NT_HashRemove(PRWaitGroup *group, PRFileDesc *fd)
700 : {
701 : PRRecvWait **waiter;
702 :
703 : _PR_MD_LOCK(&group->mdlock);
704 : waiter = _MW_LookupInternal(group, fd);
705 : if (NULL != waiter)
706 : {
707 : group->waiter->count -= 1;
708 : *waiter = NULL;
709 : }
710 : _PR_MD_UNLOCK(&group->mdlock);
711 : return (NULL != waiter) ? PR_SUCCESS : PR_FAILURE;
712 : }
713 :
714 : PRStatus NT_HashRemoveInternal(PRWaitGroup *group, PRFileDesc *fd)
715 : {
716 : PRRecvWait **waiter;
717 :
718 : waiter = _MW_LookupInternal(group, fd);
719 : if (NULL != waiter)
720 : {
721 : group->waiter->count -= 1;
722 : *waiter = NULL;
723 : }
724 : return (NULL != waiter) ? PR_SUCCESS : PR_FAILURE;
725 : }
726 : #endif /* WINNT */
727 :
728 : /******************************************************************/
729 : /******************************************************************/
730 : /********************** The public API portion ********************/
731 : /******************************************************************/
732 : /******************************************************************/
733 0 : PR_IMPLEMENT(PRStatus) PR_AddWaitFileDesc(
734 : PRWaitGroup *group, PRRecvWait *desc)
735 : {
736 : _PR_HashStory hrv;
737 0 : PRStatus rv = PR_FAILURE;
738 : #ifdef WINNT
739 : _MDOverlapped *overlapped;
740 : HANDLE hFile;
741 : BOOL bResult;
742 : DWORD dwError;
743 : PRFileDesc *bottom;
744 : #endif
745 :
746 0 : if (!_pr_initialized) _PR_ImplicitInitialization();
747 0 : if ((NULL == group) && (NULL == (group = MW_Init2())))
748 : {
749 0 : return rv;
750 : }
751 :
752 0 : PR_ASSERT(NULL != desc->fd);
753 :
754 0 : desc->outcome = PR_MW_PENDING; /* nice, well known value */
755 0 : desc->bytesRecv = 0; /* likewise, though this value is ambiguious */
756 :
757 0 : PR_Lock(group->ml);
758 :
759 0 : if (_prmw_running != group->state)
760 : {
761 : /* Not allowed to add after cancelling the group */
762 0 : desc->outcome = PR_MW_INTERRUPT;
763 0 : PR_SetError(PR_INVALID_STATE_ERROR, 0);
764 0 : PR_Unlock(group->ml);
765 0 : return rv;
766 : }
767 :
768 : #ifdef WINNT
769 : _PR_MD_LOCK(&group->mdlock);
770 : #endif
771 :
772 : /*
773 : ** If the waiter count is zero at this point, there's no telling
774 : ** how long we've been idle. Therefore, initialize the beginning
775 : ** of the timing interval. As long as the list doesn't go empty,
776 : ** it will maintain itself.
777 : */
778 0 : if (0 == group->waiter->count)
779 0 : group->last_poll = PR_IntervalNow();
780 :
781 : do
782 : {
783 0 : hrv = MW_AddHashInternal(desc, group->waiter);
784 0 : if (_prmw_rehash != hrv) break;
785 0 : hrv = MW_ExpandHashInternal(group); /* gruesome */
786 0 : if (_prmw_success != hrv) break;
787 0 : } while (PR_TRUE);
788 :
789 : #ifdef WINNT
790 : _PR_MD_UNLOCK(&group->mdlock);
791 : #endif
792 :
793 0 : PR_NotifyCondVar(group->new_business); /* tell the world */
794 0 : rv = (_prmw_success == hrv) ? PR_SUCCESS : PR_FAILURE;
795 0 : PR_Unlock(group->ml);
796 :
797 : #ifdef WINNT
798 : overlapped = PR_NEWZAP(_MDOverlapped);
799 : if (NULL == overlapped)
800 : {
801 : PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
802 : NT_HashRemove(group, desc->fd);
803 : return rv;
804 : }
805 : overlapped->ioModel = _MD_MultiWaitIO;
806 : overlapped->data.mw.desc = desc;
807 : overlapped->data.mw.group = group;
808 : if (desc->timeout != PR_INTERVAL_NO_TIMEOUT)
809 : {
810 : overlapped->data.mw.timer = CreateTimer(
811 : desc->timeout,
812 : NT_TimeProc,
813 : overlapped);
814 : if (0 == overlapped->data.mw.timer)
815 : {
816 : NT_HashRemove(group, desc->fd);
817 : PR_DELETE(overlapped);
818 : /*
819 : * XXX It appears that a maximum of 16 timer events can
820 : * be outstanding. GetLastError() returns 0 when I try it.
821 : */
822 : PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, GetLastError());
823 : return PR_FAILURE;
824 : }
825 : }
826 :
827 : /* Reach to the bottom layer to get the OS fd */
828 : bottom = PR_GetIdentitiesLayer(desc->fd, PR_NSPR_IO_LAYER);
829 : PR_ASSERT(NULL != bottom);
830 : if (NULL == bottom)
831 : {
832 : PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
833 : return PR_FAILURE;
834 : }
835 : hFile = (HANDLE)bottom->secret->md.osfd;
836 : if (!bottom->secret->md.io_model_committed)
837 : {
838 : PRInt32 st;
839 : st = _md_Associate(hFile);
840 : PR_ASSERT(0 != st);
841 : bottom->secret->md.io_model_committed = PR_TRUE;
842 : }
843 : bResult = ReadFile(hFile,
844 : desc->buffer.start,
845 : (DWORD)desc->buffer.length,
846 : NULL,
847 : &overlapped->overlapped);
848 : if (FALSE == bResult && (dwError = GetLastError()) != ERROR_IO_PENDING)
849 : {
850 : if (desc->timeout != PR_INTERVAL_NO_TIMEOUT)
851 : {
852 : if (InterlockedCompareExchange((LONG *)&desc->outcome,
853 : (LONG)PR_MW_FAILURE, (LONG)PR_MW_PENDING)
854 : == (LONG)PR_MW_PENDING)
855 : {
856 : CancelTimer(overlapped->data.mw.timer);
857 : }
858 : NT_HashRemove(group, desc->fd);
859 : PR_DELETE(overlapped);
860 : }
861 : _PR_MD_MAP_READ_ERROR(dwError);
862 : rv = PR_FAILURE;
863 : }
864 : #endif
865 :
866 0 : return rv;
867 : } /* PR_AddWaitFileDesc */
868 :
869 0 : PR_IMPLEMENT(PRRecvWait*) PR_WaitRecvReady(PRWaitGroup *group)
870 : {
871 0 : PRCList *io_ready = NULL;
872 : #ifdef WINNT
873 : PRThread *me = _PR_MD_CURRENT_THREAD();
874 : _MDOverlapped *overlapped;
875 : #endif
876 :
877 0 : if (!_pr_initialized) _PR_ImplicitInitialization();
878 0 : if ((NULL == group) && (NULL == (group = MW_Init2()))) goto failed_init;
879 :
880 0 : PR_Lock(group->ml);
881 :
882 0 : if (_prmw_running != group->state)
883 : {
884 0 : PR_SetError(PR_INVALID_STATE_ERROR, 0);
885 0 : goto invalid_state;
886 : }
887 :
888 0 : group->waiting_threads += 1; /* the polling thread is counted */
889 :
890 : #ifdef WINNT
891 : _PR_MD_LOCK(&group->mdlock);
892 : while (PR_CLIST_IS_EMPTY(&group->io_ready))
893 : {
894 : _PR_THREAD_LOCK(me);
895 : me->state = _PR_IO_WAIT;
896 : PR_APPEND_LINK(&me->waitQLinks, &group->wait_list);
897 : if (!_PR_IS_NATIVE_THREAD(me))
898 : {
899 : _PR_SLEEPQ_LOCK(me->cpu);
900 : _PR_ADD_SLEEPQ(me, PR_INTERVAL_NO_TIMEOUT);
901 : _PR_SLEEPQ_UNLOCK(me->cpu);
902 : }
903 : _PR_THREAD_UNLOCK(me);
904 : _PR_MD_UNLOCK(&group->mdlock);
905 : PR_Unlock(group->ml);
906 : _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT);
907 : me->state = _PR_RUNNING;
908 : PR_Lock(group->ml);
909 : _PR_MD_LOCK(&group->mdlock);
910 : if (_PR_PENDING_INTERRUPT(me)) {
911 : PR_REMOVE_LINK(&me->waitQLinks);
912 : _PR_MD_UNLOCK(&group->mdlock);
913 : me->flags &= ~_PR_INTERRUPT;
914 : me->io_suspended = PR_FALSE;
915 : PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
916 : goto aborted;
917 : }
918 : }
919 : io_ready = PR_LIST_HEAD(&group->io_ready);
920 : PR_ASSERT(io_ready != NULL);
921 : PR_REMOVE_LINK(io_ready);
922 : _PR_MD_UNLOCK(&group->mdlock);
923 : overlapped = (_MDOverlapped *)
924 : ((char *)io_ready - offsetof(_MDOverlapped, data));
925 : io_ready = &overlapped->data.mw.desc->internal;
926 : #else
927 : do
928 : {
929 : /*
930 : ** If the I/O ready list isn't empty, have this thread
931 : ** return with the first receive wait object that's available.
932 : */
933 0 : if (PR_CLIST_IS_EMPTY(&group->io_ready))
934 : {
935 : /*
936 : ** Is there a polling thread yet? If not, grab this thread
937 : ** and use it.
938 : */
939 0 : if (NULL == group->poller)
940 : {
941 : /*
942 : ** This thread will stay do polling until it becomes the only one
943 : ** left to service a completion. Then it will return and there will
944 : ** be none left to actually poll or to run completions.
945 : **
946 : ** The polling function should only return w/ failure or
947 : ** with some I/O ready.
948 : */
949 0 : if (PR_FAILURE == _MW_PollInternal(group)) goto failed_poll;
950 : }
951 : else
952 : {
953 : /*
954 : ** There are four reasons a thread can be awakened from
955 : ** a wait on the io_complete condition variable.
956 : ** 1. Some I/O has completed, i.e., the io_ready list
957 : ** is nonempty.
958 : ** 2. The wait group is canceled.
959 : ** 3. The thread is interrupted.
960 : ** 4. The current polling thread has to leave and needs
961 : ** a replacement.
962 : ** The logic to find a new polling thread is made more
963 : ** complicated by all the other possible events.
964 : ** I tried my best to write the logic clearly, but
965 : ** it is still full of if's with continue and goto.
966 : */
967 : PRStatus st;
968 : do
969 : {
970 0 : st = PR_WaitCondVar(group->io_complete, PR_INTERVAL_NO_TIMEOUT);
971 0 : if (_prmw_running != group->state)
972 : {
973 0 : PR_SetError(PR_INVALID_STATE_ERROR, 0);
974 0 : goto aborted;
975 : }
976 0 : if (_MW_ABORTED(st) || (NULL == group->poller)) break;
977 0 : } while (PR_CLIST_IS_EMPTY(&group->io_ready));
978 :
979 : /*
980 : ** The thread is interrupted and has to leave. It might
981 : ** have also been awakened to process ready i/o or be the
982 : ** new poller. To be safe, if either condition is true,
983 : ** we awaken another thread to take its place.
984 : */
985 0 : if (_MW_ABORTED(st))
986 : {
987 0 : if ((NULL == group->poller
988 0 : || !PR_CLIST_IS_EMPTY(&group->io_ready))
989 0 : && group->waiting_threads > 1)
990 0 : PR_NotifyCondVar(group->io_complete);
991 0 : goto aborted;
992 : }
993 :
994 : /*
995 : ** A new poller is needed, but can I be the new poller?
996 : ** If there is no i/o ready, sure. But if there is any
997 : ** i/o ready, it has a higher priority. I want to
998 : ** process the ready i/o first and wake up another
999 : ** thread to be the new poller.
1000 : */
1001 0 : if (NULL == group->poller)
1002 : {
1003 0 : if (PR_CLIST_IS_EMPTY(&group->io_ready))
1004 0 : continue;
1005 0 : if (group->waiting_threads > 1)
1006 0 : PR_NotifyCondVar(group->io_complete);
1007 : }
1008 : }
1009 0 : PR_ASSERT(!PR_CLIST_IS_EMPTY(&group->io_ready));
1010 : }
1011 0 : io_ready = PR_LIST_HEAD(&group->io_ready);
1012 0 : PR_NotifyCondVar(group->io_taken);
1013 0 : PR_ASSERT(io_ready != NULL);
1014 0 : PR_REMOVE_LINK(io_ready);
1015 0 : } while (NULL == io_ready);
1016 :
1017 : failed_poll:
1018 :
1019 : #endif
1020 :
1021 : aborted:
1022 :
1023 0 : group->waiting_threads -= 1;
1024 : invalid_state:
1025 0 : (void)MW_TestForShutdownInternal(group);
1026 0 : PR_Unlock(group->ml);
1027 :
1028 : failed_init:
1029 0 : if (NULL != io_ready)
1030 : {
1031 : /* If the operation failed, record the reason why */
1032 0 : switch (((PRRecvWait*)io_ready)->outcome)
1033 : {
1034 : case PR_MW_PENDING:
1035 0 : PR_ASSERT(0);
1036 0 : break;
1037 : case PR_MW_SUCCESS:
1038 : #ifndef WINNT
1039 0 : _MW_InitialRecv(io_ready);
1040 : #endif
1041 0 : break;
1042 : #ifdef WINNT
1043 : case PR_MW_FAILURE:
1044 : _PR_MD_MAP_READ_ERROR(overlapped->data.mw.error);
1045 : break;
1046 : #endif
1047 : case PR_MW_TIMEOUT:
1048 0 : PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
1049 0 : break;
1050 : case PR_MW_INTERRUPT:
1051 0 : PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
1052 0 : break;
1053 0 : default: break;
1054 : }
1055 : #ifdef WINNT
1056 : if (NULL != overlapped->data.mw.timer)
1057 : {
1058 : PR_ASSERT(PR_INTERVAL_NO_TIMEOUT
1059 : != overlapped->data.mw.desc->timeout);
1060 : CancelTimer(overlapped->data.mw.timer);
1061 : }
1062 : else
1063 : {
1064 : PR_ASSERT(PR_INTERVAL_NO_TIMEOUT
1065 : == overlapped->data.mw.desc->timeout);
1066 : }
1067 : PR_DELETE(overlapped);
1068 : #endif
1069 : }
1070 0 : return (PRRecvWait*)io_ready;
1071 : } /* PR_WaitRecvReady */
1072 :
1073 0 : PR_IMPLEMENT(PRStatus) PR_CancelWaitFileDesc(PRWaitGroup *group, PRRecvWait *desc)
1074 : {
1075 : #if !defined(WINNT)
1076 : PRRecvWait **recv_wait;
1077 : #endif
1078 0 : PRStatus rv = PR_SUCCESS;
1079 0 : if (NULL == group) group = mw_state->group;
1080 0 : PR_ASSERT(NULL != group);
1081 0 : if (NULL == group)
1082 : {
1083 0 : PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1084 0 : return PR_FAILURE;
1085 : }
1086 :
1087 0 : PR_Lock(group->ml);
1088 :
1089 0 : if (_prmw_running != group->state)
1090 : {
1091 0 : PR_SetError(PR_INVALID_STATE_ERROR, 0);
1092 0 : rv = PR_FAILURE;
1093 0 : goto unlock;
1094 : }
1095 :
1096 : #ifdef WINNT
1097 : if (InterlockedCompareExchange((LONG *)&desc->outcome,
1098 : (LONG)PR_MW_INTERRUPT, (LONG)PR_MW_PENDING) == (LONG)PR_MW_PENDING)
1099 : {
1100 : PRFileDesc *bottom = PR_GetIdentitiesLayer(desc->fd, PR_NSPR_IO_LAYER);
1101 : PR_ASSERT(NULL != bottom);
1102 : if (NULL == bottom)
1103 : {
1104 : PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1105 : goto unlock;
1106 : }
1107 : bottom->secret->state = _PR_FILEDESC_CLOSED;
1108 : #if 0
1109 : fprintf(stderr, "cancel wait recv: closing socket\n");
1110 : #endif
1111 : if (closesocket(bottom->secret->md.osfd) == SOCKET_ERROR)
1112 : {
1113 : fprintf(stderr, "closesocket failed: %d\n", WSAGetLastError());
1114 : exit(1);
1115 : }
1116 : }
1117 : #else
1118 0 : if (NULL != (recv_wait = _MW_LookupInternal(group, desc->fd)))
1119 : {
1120 : /* it was in the wait table */
1121 0 : _MW_DoneInternal(group, recv_wait, PR_MW_INTERRUPT);
1122 0 : goto unlock;
1123 : }
1124 0 : if (!PR_CLIST_IS_EMPTY(&group->io_ready))
1125 : {
1126 : /* is it already complete? */
1127 0 : PRCList *head = PR_LIST_HEAD(&group->io_ready);
1128 : do
1129 : {
1130 0 : PRRecvWait *done = (PRRecvWait*)head;
1131 0 : if (done == desc) goto unlock;
1132 0 : head = PR_NEXT_LINK(head);
1133 0 : } while (head != &group->io_ready);
1134 : }
1135 0 : PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1136 0 : rv = PR_FAILURE;
1137 :
1138 : #endif
1139 : unlock:
1140 0 : PR_Unlock(group->ml);
1141 0 : return rv;
1142 : } /* PR_CancelWaitFileDesc */
1143 :
1144 0 : PR_IMPLEMENT(PRRecvWait*) PR_CancelWaitGroup(PRWaitGroup *group)
1145 : {
1146 : PRRecvWait **desc;
1147 0 : PRRecvWait *recv_wait = NULL;
1148 : #ifdef WINNT
1149 : _MDOverlapped *overlapped;
1150 : PRRecvWait **end;
1151 : PRThread *me = _PR_MD_CURRENT_THREAD();
1152 : #endif
1153 :
1154 0 : if (NULL == group) group = mw_state->group;
1155 0 : PR_ASSERT(NULL != group);
1156 0 : if (NULL == group)
1157 : {
1158 0 : PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1159 0 : return NULL;
1160 : }
1161 :
1162 0 : PR_Lock(group->ml);
1163 0 : if (_prmw_stopped != group->state)
1164 : {
1165 0 : if (_prmw_running == group->state)
1166 0 : group->state = _prmw_stopping; /* so nothing new comes in */
1167 0 : if (0 == group->waiting_threads) /* is there anybody else? */
1168 0 : group->state = _prmw_stopped; /* we can stop right now */
1169 : else
1170 : {
1171 0 : PR_NotifyAllCondVar(group->new_business);
1172 0 : PR_NotifyAllCondVar(group->io_complete);
1173 : }
1174 0 : while (_prmw_stopped != group->state)
1175 0 : (void)PR_WaitCondVar(group->mw_manage, PR_INTERVAL_NO_TIMEOUT);
1176 : }
1177 :
1178 : #ifdef WINNT
1179 : _PR_MD_LOCK(&group->mdlock);
1180 : #endif
1181 : /* make all the existing descriptors look done/interrupted */
1182 : #ifdef WINNT
1183 : end = &group->waiter->recv_wait + group->waiter->length;
1184 : for (desc = &group->waiter->recv_wait; desc < end; ++desc)
1185 : {
1186 : if (NULL != *desc)
1187 : {
1188 : if (InterlockedCompareExchange((LONG *)&(*desc)->outcome,
1189 : (LONG)PR_MW_INTERRUPT, (LONG)PR_MW_PENDING)
1190 : == (LONG)PR_MW_PENDING)
1191 : {
1192 : PRFileDesc *bottom = PR_GetIdentitiesLayer(
1193 : (*desc)->fd, PR_NSPR_IO_LAYER);
1194 : PR_ASSERT(NULL != bottom);
1195 : if (NULL == bottom)
1196 : {
1197 : PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1198 : goto invalid_arg;
1199 : }
1200 : bottom->secret->state = _PR_FILEDESC_CLOSED;
1201 : #if 0
1202 : fprintf(stderr, "cancel wait group: closing socket\n");
1203 : #endif
1204 : if (closesocket(bottom->secret->md.osfd) == SOCKET_ERROR)
1205 : {
1206 : fprintf(stderr, "closesocket failed: %d\n",
1207 : WSAGetLastError());
1208 : exit(1);
1209 : }
1210 : }
1211 : }
1212 : }
1213 : while (group->waiter->count > 0)
1214 : {
1215 : _PR_THREAD_LOCK(me);
1216 : me->state = _PR_IO_WAIT;
1217 : PR_APPEND_LINK(&me->waitQLinks, &group->wait_list);
1218 : if (!_PR_IS_NATIVE_THREAD(me))
1219 : {
1220 : _PR_SLEEPQ_LOCK(me->cpu);
1221 : _PR_ADD_SLEEPQ(me, PR_INTERVAL_NO_TIMEOUT);
1222 : _PR_SLEEPQ_UNLOCK(me->cpu);
1223 : }
1224 : _PR_THREAD_UNLOCK(me);
1225 : _PR_MD_UNLOCK(&group->mdlock);
1226 : PR_Unlock(group->ml);
1227 : _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT);
1228 : me->state = _PR_RUNNING;
1229 : PR_Lock(group->ml);
1230 : _PR_MD_LOCK(&group->mdlock);
1231 : }
1232 : #else
1233 0 : for (desc = &group->waiter->recv_wait; group->waiter->count > 0; ++desc)
1234 : {
1235 0 : PR_ASSERT(desc < &group->waiter->recv_wait + group->waiter->length);
1236 0 : if (NULL != *desc)
1237 0 : _MW_DoneInternal(group, desc, PR_MW_INTERRUPT);
1238 : }
1239 : #endif
1240 :
1241 : /* take first element of finished list and return it or NULL */
1242 0 : if (PR_CLIST_IS_EMPTY(&group->io_ready))
1243 0 : PR_SetError(PR_GROUP_EMPTY_ERROR, 0);
1244 : else
1245 : {
1246 0 : PRCList *head = PR_LIST_HEAD(&group->io_ready);
1247 0 : PR_REMOVE_AND_INIT_LINK(head);
1248 : #ifdef WINNT
1249 : overlapped = (_MDOverlapped *)
1250 : ((char *)head - offsetof(_MDOverlapped, data));
1251 : head = &overlapped->data.mw.desc->internal;
1252 : if (NULL != overlapped->data.mw.timer)
1253 : {
1254 : PR_ASSERT(PR_INTERVAL_NO_TIMEOUT
1255 : != overlapped->data.mw.desc->timeout);
1256 : CancelTimer(overlapped->data.mw.timer);
1257 : }
1258 : else
1259 : {
1260 : PR_ASSERT(PR_INTERVAL_NO_TIMEOUT
1261 : == overlapped->data.mw.desc->timeout);
1262 : }
1263 : PR_DELETE(overlapped);
1264 : #endif
1265 0 : recv_wait = (PRRecvWait*)head;
1266 : }
1267 : #ifdef WINNT
1268 : invalid_arg:
1269 : _PR_MD_UNLOCK(&group->mdlock);
1270 : #endif
1271 0 : PR_Unlock(group->ml);
1272 :
1273 0 : return recv_wait;
1274 : } /* PR_CancelWaitGroup */
1275 :
1276 0 : PR_IMPLEMENT(PRWaitGroup*) PR_CreateWaitGroup(PRInt32 size /* ignored */)
1277 : {
1278 : PRWaitGroup *wg;
1279 :
1280 0 : if (NULL == (wg = PR_NEWZAP(PRWaitGroup)))
1281 : {
1282 0 : PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1283 0 : goto failed;
1284 : }
1285 : /* the wait group itself */
1286 0 : wg->ml = PR_NewLock();
1287 0 : if (NULL == wg->ml) goto failed_lock;
1288 0 : wg->io_taken = PR_NewCondVar(wg->ml);
1289 0 : if (NULL == wg->io_taken) goto failed_cvar0;
1290 0 : wg->io_complete = PR_NewCondVar(wg->ml);
1291 0 : if (NULL == wg->io_complete) goto failed_cvar1;
1292 0 : wg->new_business = PR_NewCondVar(wg->ml);
1293 0 : if (NULL == wg->new_business) goto failed_cvar2;
1294 0 : wg->mw_manage = PR_NewCondVar(wg->ml);
1295 0 : if (NULL == wg->mw_manage) goto failed_cvar3;
1296 :
1297 0 : PR_INIT_CLIST(&wg->group_link);
1298 0 : PR_INIT_CLIST(&wg->io_ready);
1299 :
1300 : /* the waiters sequence */
1301 0 : wg->waiter = (_PRWaiterHash*)PR_CALLOC(
1302 : sizeof(_PRWaiterHash) +
1303 : (_PR_DEFAULT_HASH_LENGTH * sizeof(PRRecvWait*)));
1304 0 : if (NULL == wg->waiter)
1305 : {
1306 0 : PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1307 0 : goto failed_waiter;
1308 : }
1309 0 : wg->waiter->count = 0;
1310 0 : wg->waiter->length = _PR_DEFAULT_HASH_LENGTH;
1311 :
1312 : #ifdef WINNT
1313 : _PR_MD_NEW_LOCK(&wg->mdlock);
1314 : PR_INIT_CLIST(&wg->wait_list);
1315 : #endif /* WINNT */
1316 :
1317 0 : PR_Lock(mw_lock);
1318 0 : PR_APPEND_LINK(&wg->group_link, &mw_state->group_list);
1319 0 : PR_Unlock(mw_lock);
1320 0 : return wg;
1321 :
1322 : failed_waiter:
1323 0 : PR_DestroyCondVar(wg->mw_manage);
1324 : failed_cvar3:
1325 0 : PR_DestroyCondVar(wg->new_business);
1326 : failed_cvar2:
1327 0 : PR_DestroyCondVar(wg->io_complete);
1328 : failed_cvar1:
1329 0 : PR_DestroyCondVar(wg->io_taken);
1330 : failed_cvar0:
1331 0 : PR_DestroyLock(wg->ml);
1332 : failed_lock:
1333 0 : PR_DELETE(wg);
1334 0 : wg = NULL;
1335 :
1336 : failed:
1337 0 : return wg;
1338 : } /* MW_CreateWaitGroup */
1339 :
1340 0 : PR_IMPLEMENT(PRStatus) PR_DestroyWaitGroup(PRWaitGroup *group)
1341 : {
1342 0 : PRStatus rv = PR_SUCCESS;
1343 0 : if (NULL == group) group = mw_state->group;
1344 0 : PR_ASSERT(NULL != group);
1345 0 : if (NULL != group)
1346 : {
1347 0 : PR_Lock(group->ml);
1348 0 : if ((group->waiting_threads == 0)
1349 0 : && (group->waiter->count == 0)
1350 0 : && PR_CLIST_IS_EMPTY(&group->io_ready))
1351 : {
1352 0 : group->state = _prmw_stopped;
1353 : }
1354 : else
1355 : {
1356 0 : PR_SetError(PR_INVALID_STATE_ERROR, 0);
1357 0 : rv = PR_FAILURE;
1358 : }
1359 0 : PR_Unlock(group->ml);
1360 0 : if (PR_FAILURE == rv) return rv;
1361 :
1362 0 : PR_Lock(mw_lock);
1363 0 : PR_REMOVE_LINK(&group->group_link);
1364 0 : PR_Unlock(mw_lock);
1365 :
1366 : #ifdef WINNT
1367 : /*
1368 : * XXX make sure wait_list is empty and waiter is empty.
1369 : * These must be checked while holding mdlock.
1370 : */
1371 : _PR_MD_FREE_LOCK(&group->mdlock);
1372 : #endif
1373 :
1374 0 : PR_DELETE(group->waiter);
1375 0 : PR_DELETE(group->polling_list);
1376 0 : PR_DestroyCondVar(group->mw_manage);
1377 0 : PR_DestroyCondVar(group->new_business);
1378 0 : PR_DestroyCondVar(group->io_complete);
1379 0 : PR_DestroyCondVar(group->io_taken);
1380 0 : PR_DestroyLock(group->ml);
1381 0 : if (group == mw_state->group) mw_state->group = NULL;
1382 0 : PR_DELETE(group);
1383 : }
1384 : else
1385 : {
1386 : /* The default wait group is not created yet. */
1387 0 : PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1388 0 : rv = PR_FAILURE;
1389 : }
1390 0 : return rv;
1391 : } /* PR_DestroyWaitGroup */
1392 :
1393 : /**********************************************************************
1394 : ***********************************************************************
1395 : ******************** Wait group enumerations **************************
1396 : ***********************************************************************
1397 : **********************************************************************/
1398 :
1399 0 : PR_IMPLEMENT(PRMWaitEnumerator*) PR_CreateMWaitEnumerator(PRWaitGroup *group)
1400 : {
1401 0 : PRMWaitEnumerator *enumerator = PR_NEWZAP(PRMWaitEnumerator);
1402 0 : if (NULL == enumerator) PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1403 : else
1404 : {
1405 0 : enumerator->group = group;
1406 0 : enumerator->seal = _PR_ENUM_SEALED;
1407 : }
1408 0 : return enumerator;
1409 : } /* PR_CreateMWaitEnumerator */
1410 :
1411 0 : PR_IMPLEMENT(PRStatus) PR_DestroyMWaitEnumerator(PRMWaitEnumerator* enumerator)
1412 : {
1413 0 : PR_ASSERT(NULL != enumerator);
1414 0 : PR_ASSERT(_PR_ENUM_SEALED == enumerator->seal);
1415 0 : if ((NULL == enumerator) || (_PR_ENUM_SEALED != enumerator->seal))
1416 : {
1417 0 : PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1418 0 : return PR_FAILURE;
1419 : }
1420 0 : enumerator->seal = _PR_ENUM_UNSEALED;
1421 0 : PR_Free(enumerator);
1422 0 : return PR_SUCCESS;
1423 : } /* PR_DestroyMWaitEnumerator */
1424 :
1425 0 : PR_IMPLEMENT(PRRecvWait*) PR_EnumerateWaitGroup(
1426 : PRMWaitEnumerator *enumerator, const PRRecvWait *previous)
1427 : {
1428 0 : PRRecvWait *result = NULL;
1429 :
1430 : /* entry point sanity checking */
1431 0 : PR_ASSERT(NULL != enumerator);
1432 0 : PR_ASSERT(_PR_ENUM_SEALED == enumerator->seal);
1433 0 : if ((NULL == enumerator)
1434 0 : || (_PR_ENUM_SEALED != enumerator->seal)) goto bad_argument;
1435 :
1436 : /* beginning of enumeration */
1437 0 : if (NULL == previous)
1438 : {
1439 0 : if (NULL == enumerator->group)
1440 : {
1441 0 : enumerator->group = mw_state->group;
1442 0 : if (NULL == enumerator->group)
1443 : {
1444 0 : PR_SetError(PR_GROUP_EMPTY_ERROR, 0);
1445 0 : return NULL;
1446 : }
1447 : }
1448 0 : enumerator->waiter = &enumerator->group->waiter->recv_wait;
1449 0 : enumerator->p_timestamp = enumerator->group->p_timestamp;
1450 0 : enumerator->thread = PR_GetCurrentThread();
1451 0 : enumerator->index = 0;
1452 : }
1453 : /* continuing an enumeration */
1454 : else
1455 : {
1456 0 : PRThread *me = PR_GetCurrentThread();
1457 0 : PR_ASSERT(me == enumerator->thread);
1458 0 : if (me != enumerator->thread) goto bad_argument;
1459 :
1460 : /* need to restart the enumeration */
1461 0 : if (enumerator->p_timestamp != enumerator->group->p_timestamp)
1462 0 : return PR_EnumerateWaitGroup(enumerator, NULL);
1463 : }
1464 :
1465 : /* actually progress the enumeration */
1466 : #if defined(WINNT)
1467 : _PR_MD_LOCK(&enumerator->group->mdlock);
1468 : #else
1469 0 : PR_Lock(enumerator->group->ml);
1470 : #endif
1471 0 : while (enumerator->index++ < enumerator->group->waiter->length)
1472 : {
1473 0 : if (NULL != (result = *(enumerator->waiter)++)) break;
1474 : }
1475 : #if defined(WINNT)
1476 : _PR_MD_UNLOCK(&enumerator->group->mdlock);
1477 : #else
1478 0 : PR_Unlock(enumerator->group->ml);
1479 : #endif
1480 :
1481 0 : return result; /* what we live for */
1482 :
1483 : bad_argument:
1484 0 : PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1485 0 : return NULL; /* probably ambiguous */
1486 : } /* PR_EnumerateWaitGroup */
1487 :
1488 : /* prmwait.c */
|