X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=kernel%2Fwait.c;h=444ddbfaefc490839e2cf913cb688c7ebb9f0df9;hb=ba7cc09c9c9e29a57045dc5bbf843ac1cfad3283;hp=5985d866531f29b4165f81603cd30487acd111bd;hpb=b4bc7b53ccfa0cb793591ba11af49db8f1bc5a4d;p=powerpc.git diff --git a/kernel/wait.c b/kernel/wait.c index 5985d86653..444ddbfaef 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -10,6 +10,14 @@ #include #include +void init_waitqueue_head(wait_queue_head_t *q) +{ + spin_lock_init(&q->lock); + INIT_LIST_HEAD(&q->task_list); +} + +EXPORT_SYMBOL(init_waitqueue_head); + void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; @@ -53,7 +61,7 @@ EXPORT_SYMBOL(remove_wait_queue); * The spin_unlock() itself is semi-permeable and only protects * one way (it only protects stuff inside the critical region and * stops them from bleeding out - it would still allow subsequent - * loads to move into the the critical region). + * loads to move into the critical region). */ void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)