X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=fs%2Feventpoll.c;h=34f68f3a069a1b7d4924dc8be718927f9e6c791d;hb=dbee2199c37336e89060fbe9abdfd1ca8454372a;hp=0b73cd45a06d6a05a2b37231a39a51cf1b8a8e4e;hpb=5b58e21a27028a9f0399449d8bc8494fd9d9ff70;p=powerpc.git diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 0b73cd45a0..34f68f3a06 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -325,15 +325,14 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) int wake_nests = 0; unsigned long flags; struct task_struct *this_task = current; - struct list_head *lsthead = &psw->wake_task_list, *lnk; + struct list_head *lsthead = &psw->wake_task_list; struct wake_task_node *tncur; struct wake_task_node tnode; spin_lock_irqsave(&psw->lock, flags); /* Try to see if the current task is already inside this wakeup call */ - list_for_each(lnk, lsthead) { - tncur = list_entry(lnk, struct wake_task_node, llink); + list_for_each_entry(tncur, lsthead, llink) { if (tncur->wq == wq || (tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) { @@ -463,7 +462,7 @@ static void ep_free(struct eventpoll *ep) * holding "epmutex" we can be sure that no file cleanup code will hit * us during this operation. So we can avoid the lock on "ep->lock". */ - while ((rbp = rb_first(&ep->rbr)) != 0) { + while ((rbp = rb_first(&ep->rbr)) != NULL) { epi = rb_entry(rbp, struct epitem, rbn); ep_remove(ep, epi); } @@ -1324,12 +1323,12 @@ static int __init eventpoll_init(void) /* Allocates slab cache used to allocate "struct epitem" items */ epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), 0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC, - NULL, NULL); + NULL); /* Allocates slab cache used to allocate "struct eppoll_entry" */ pwq_cache = kmem_cache_create("eventpoll_pwq", sizeof(struct eppoll_entry), 0, - EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL); + EPI_SLAB_DEBUG|SLAB_PANIC, NULL); return 0; }