X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=fs%2Faio.c;h=aec2b1916d1b27e4bc9411212359801b8ba70386;hb=c22db9412736204b25aeba19d18e5ea922f7d632;hp=edfca5b7553581c65cddc50ac14e18f84ec2826c;hpb=4c2cb58c552a34744979a99ccf01762d5eb7e288;p=powerpc.git diff --git a/fs/aio.c b/fs/aio.c index edfca5b755..aec2b1916d 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include @@ -42,8 +41,9 @@ #endif /*------ sysctl variables----*/ -atomic_t aio_nr = ATOMIC_INIT(0); /* current system wide number of aio requests */ -unsigned aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ +static DEFINE_SPINLOCK(aio_nr_lock); +unsigned long aio_nr; /* current system wide number of aio requests */ +unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ /*----end sysctl variables---*/ static kmem_cache_t *kiocb_cachep; @@ -208,7 +208,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) return ERR_PTR(-EINVAL); } - if (nr_events > aio_max_nr) + if ((unsigned long)nr_events > aio_max_nr) return ERR_PTR(-EAGAIN); ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL); @@ -233,8 +233,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) goto out_freectx; /* limit the number of system wide aios */ - atomic_add(ctx->max_reqs, &aio_nr); /* undone by __put_ioctx */ - if (unlikely(atomic_read(&aio_nr) > aio_max_nr)) + spin_lock(&aio_nr_lock); + if (aio_nr + ctx->max_reqs > aio_max_nr || + aio_nr + ctx->max_reqs < aio_nr) + ctx->max_reqs = 0; + else + aio_nr += ctx->max_reqs; + spin_unlock(&aio_nr_lock); + if (ctx->max_reqs == 0) goto out_cleanup; /* now link into global list. kludge. FIXME */ @@ -248,8 +254,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) return ctx; out_cleanup: - atomic_sub(ctx->max_reqs, &aio_nr); - ctx->max_reqs = 0; /* prevent __put_ioctx from sub'ing aio_nr */ __put_ioctx(ctx); return ERR_PTR(-EAGAIN); @@ -374,7 +378,12 @@ void fastcall __put_ioctx(struct kioctx *ctx) pr_debug("__put_ioctx: freeing %p\n", ctx); kmem_cache_free(kioctx_cachep, ctx); - atomic_sub(nr_events, &aio_nr); + if (nr_events) { + spin_lock(&aio_nr_lock); + BUG_ON(aio_nr - nr_events > aio_nr); + aio_nr -= nr_events; + spin_unlock(&aio_nr_lock); + } } /* aio_get_req @@ -447,6 +456,8 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx) static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) { + assert_spin_locked(&ctx->ctx_lock); + if (req->ki_dtor) req->ki_dtor(req); kmem_cache_free(kiocb_cachep, req); @@ -488,6 +499,8 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n", req, atomic_read(&req->ki_filp->f_count)); + assert_spin_locked(&ctx->ctx_lock); + req->ki_users --; if (unlikely(req->ki_users < 0)) BUG(); @@ -500,7 +513,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) /* Must be done under the lock to serialise against cancellation. * Call this aio_fput as it duplicates fput via the fput_work. */ - if (unlikely(rcuref_dec_and_test(&req->ki_filp->f_count))) { + if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) { get_ioctx(ctx); spin_lock(&fput_lock); list_add(&req->ki_list, &fput_head); @@ -609,14 +622,13 @@ static void unuse_mm(struct mm_struct *mm) * the kiocb (to tell the caller to activate the work * queue to process it), or 0, if it found that it was * already queued. - * - * Should be called with the spin lock iocb->ki_ctx->ctx_lock - * held */ static inline int __queue_kicked_iocb(struct kiocb *iocb) { struct kioctx *ctx = iocb->ki_ctx; + assert_spin_locked(&ctx->ctx_lock); + if (list_empty(&iocb->ki_run_list)) { list_add_tail(&iocb->ki_run_list, &ctx->run_list); @@ -761,13 +773,15 @@ out: * Process all pending retries queued on the ioctx * run list. * Assumes it is operating within the aio issuer's mm - * context. Expects to be called with ctx->ctx_lock held + * context. */ static int __aio_run_iocbs(struct kioctx *ctx) { struct kiocb *iocb; LIST_HEAD(run_list); + assert_spin_locked(&ctx->ctx_lock); + list_splice_init(&ctx->run_list, &run_list); while (!list_empty(&run_list)) { iocb = list_entry(run_list.next, struct kiocb, @@ -927,28 +941,19 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) unsigned long tail; int ret; - /* Special case handling for sync iocbs: events go directly - * into the iocb for fast handling. Note that this will not - * work if we allow sync kiocbs to be cancelled. in which - * case the usage count checks will have to move under ctx_lock - * for all cases. + /* + * Special case handling for sync iocbs: + * - events go directly into the iocb for fast handling + * - the sync task with the iocb in its stack holds the single iocb + * ref, no other paths have a way to get another ref + * - the sync task helpfully left a reference to itself in the iocb */ if (is_sync_kiocb(iocb)) { - int ret; - + BUG_ON(iocb->ki_users != 1); iocb->ki_user_data = res; - if (iocb->ki_users == 1) { - iocb->ki_users = 0; - ret = 1; - } else { - spin_lock_irq(&ctx->ctx_lock); - iocb->ki_users--; - ret = (0 == iocb->ki_users); - spin_unlock_irq(&ctx->ctx_lock); - } - /* sync iocbs put the task here for us */ + iocb->ki_users = 0; wake_up_process(iocb->ki_obj.tsk); - return ret; + return 1; } info = &ctx->ring_info; @@ -1258,8 +1263,9 @@ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp) goto out; ret = -EINVAL; - if (unlikely(ctx || (int)nr_events <= 0)) { - pr_debug("EINVAL: io_setup: ctx or nr_events > max\n"); + if (unlikely(ctx || nr_events == 0)) { + pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", + ctx, nr_events); goto out; } @@ -1602,12 +1608,14 @@ asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr, /* lookup_kiocb * Finds a given iocb for cancellation. - * MUST be called with ctx->ctx_lock held. */ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key) { struct list_head *pos; + + assert_spin_locked(&ctx->ctx_lock); + /* TODO: use a hash or array, this sucks. */ list_for_each(pos, &ctx->active_reqs) { struct kiocb *kiocb = list_kiocb(pos);