4 * Keep track of the general-purpose IO-buffer structures used to track
5 * abstract kernel-space io buffers.
9 #include <linux/iobuf.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
14 static kmem_cache_t *kiobuf_cachep;
16 void end_kio_request(struct kiobuf *kiobuf, int uptodate)
18 if ((!uptodate) && !kiobuf->errno)
21 if (atomic_dec_and_test(&kiobuf->io_count)) {
23 kiobuf->end_io(kiobuf);
24 wake_up(&kiobuf->wait_queue);
28 static int kiobuf_init(struct kiobuf *iobuf)
30 init_waitqueue_head(&iobuf->wait_queue);
36 atomic_set(&iobuf->io_count, 0);
38 return expand_kiobuf(iobuf, KIO_STATIC_PAGES);
41 int alloc_kiobuf_bhs(struct kiobuf * kiobuf)
46 kmalloc(sizeof(*kiobuf->blocks) * KIO_MAX_SECTORS, GFP_KERNEL);
47 if (unlikely(!kiobuf->blocks))
50 kmalloc(sizeof(*kiobuf->bh) * KIO_MAX_SECTORS, GFP_KERNEL);
51 if (unlikely(!kiobuf->bh))
54 for (i = 0; i < KIO_MAX_SECTORS; i++) {
55 kiobuf->bh[i] = kmem_cache_alloc(bh_cachep, GFP_KERNEL);
56 if (unlikely(!kiobuf->bh[i]))
64 kmem_cache_free(bh_cachep, kiobuf->bh[i]);
67 memset(kiobuf->bh, 0, sizeof(*kiobuf->bh) * KIO_MAX_SECTORS);
70 free_kiobuf_bhs(kiobuf);
74 void free_kiobuf_bhs(struct kiobuf * kiobuf)
79 for (i = 0; i < KIO_MAX_SECTORS; i++)
81 kmem_cache_free(bh_cachep, kiobuf->bh[i]);
87 kfree(kiobuf->blocks);
88 kiobuf->blocks = NULL;
92 int alloc_kiovec(int nr, struct kiobuf **bufp)
97 for (i = 0; i < nr; i++) {
98 iobuf = kmem_cache_alloc(kiobuf_cachep, GFP_KERNEL);
101 if (unlikely(kiobuf_init(iobuf)))
103 if (unlikely(alloc_kiobuf_bhs(iobuf)))
111 kmem_cache_free(kiobuf_cachep, iobuf);
113 free_kiovec(i, bufp);
117 void free_kiovec(int nr, struct kiobuf **bufp)
120 struct kiobuf *iobuf;
122 for (i = 0; i < nr; i++) {
125 unlock_kiovec(1, &iobuf);
126 kfree(iobuf->maplist);
127 free_kiobuf_bhs(iobuf);
128 kmem_cache_free(kiobuf_cachep, bufp[i]);
132 int expand_kiobuf(struct kiobuf *iobuf, int wanted)
134 struct page ** maplist;
136 if (iobuf->array_len >= wanted)
139 maplist = kmalloc(wanted * sizeof(struct page **), GFP_KERNEL);
140 if (unlikely(!maplist))
143 /* Did it grow while we waited? */
144 if (unlikely(iobuf->array_len >= wanted)) {
149 if (iobuf->array_len) {
150 memcpy(maplist, iobuf->maplist, iobuf->array_len * sizeof(*maplist));
151 kfree(iobuf->maplist);
154 iobuf->maplist = maplist;
155 iobuf->array_len = wanted;
159 void kiobuf_wait_for_io(struct kiobuf *kiobuf)
161 struct task_struct *tsk = current;
162 DECLARE_WAITQUEUE(wait, tsk);
164 if (atomic_read(&kiobuf->io_count) == 0)
167 add_wait_queue(&kiobuf->wait_queue, &wait);
169 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
170 if (atomic_read(&kiobuf->io_count) != 0) {
171 run_task_queue(&tq_disk);
173 if (atomic_read(&kiobuf->io_count) != 0)
176 tsk->state = TASK_RUNNING;
177 remove_wait_queue(&kiobuf->wait_queue, &wait);
180 void __init iobuf_cache_init(void)
182 kiobuf_cachep = kmem_cache_create("kiobuf", sizeof(struct kiobuf),
183 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
185 panic("Cannot create kiobuf SLAB cache");