[GFS2] Fix typo in GFS2 Makefile
[powerpc.git] / fs / gfs2 / ops_address.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License v.2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <asm/semaphore.h>
17
18 #include "gfs2.h"
19 #include "bmap.h"
20 #include "glock.h"
21 #include "inode.h"
22 #include "jdata.h"
23 #include "log.h"
24 #include "meta_io.h"
25 #include "ops_address.h"
26 #include "page.h"
27 #include "quota.h"
28 #include "trans.h"
29
30 /**
31  * get_block - Fills in a buffer head with details about a block
32  * @inode: The inode
33  * @lblock: The block number to look up
34  * @bh_result: The buffer head to return the result in
35  * @create: Non-zero if we may add block to the file
36  *
37  * Returns: errno
38  */
39
40 static int get_block(struct inode *inode, sector_t lblock,
41                      struct buffer_head *bh_result, int create)
42 {
43         struct gfs2_inode *ip = get_v2ip(inode);
44         int new = create;
45         uint64_t dblock;
46         int error;
47
48         error = gfs2_block_map(ip, lblock, &new, &dblock, NULL);
49         if (error)
50                 return error;
51
52         if (!dblock)
53                 return 0;
54
55         map_bh(bh_result, inode->i_sb, dblock);
56         if (new)
57                 set_buffer_new(bh_result);
58
59         return 0;
60 }
61
62 /**
63  * get_block_noalloc - Fills in a buffer head with details about a block
64  * @inode: The inode
65  * @lblock: The block number to look up
66  * @bh_result: The buffer head to return the result in
67  * @create: Non-zero if we may add block to the file
68  *
69  * Returns: errno
70  */
71
72 static int get_block_noalloc(struct inode *inode, sector_t lblock,
73                              struct buffer_head *bh_result, int create)
74 {
75         struct gfs2_inode *ip = get_v2ip(inode);
76         int new = 0;
77         uint64_t dblock;
78         int error;
79
80         error = gfs2_block_map(ip, lblock, &new, &dblock, NULL);
81         if (error)
82                 return error;
83
84         if (dblock)
85                 map_bh(bh_result, inode->i_sb, dblock);
86         else if (gfs2_assert_withdraw(ip->i_sbd, !create))
87                 error = -EIO;
88
89         return error;
90 }
91
92 static int get_blocks(struct inode *inode, sector_t lblock,
93                       unsigned long max_blocks, struct buffer_head *bh_result,
94                       int create)
95 {
96         struct gfs2_inode *ip = get_v2ip(inode);
97         int new = create;
98         uint64_t dblock;
99         uint32_t extlen;
100         int error;
101
102         error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
103         if (error)
104                 return error;
105
106         if (!dblock)
107                 return 0;
108
109         map_bh(bh_result, inode->i_sb, dblock);
110         if (new)
111                 set_buffer_new(bh_result);
112
113         if (extlen > max_blocks)
114                 extlen = max_blocks;
115         bh_result->b_size = extlen << inode->i_blkbits;
116
117         return 0;
118 }
119
120 static int get_blocks_noalloc(struct inode *inode, sector_t lblock,
121                               unsigned long max_blocks,
122                               struct buffer_head *bh_result, int create)
123 {
124         struct gfs2_inode *ip = get_v2ip(inode);
125         int new = 0;
126         uint64_t dblock;
127         uint32_t extlen;
128         int error;
129
130         error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
131         if (error)
132                 return error;
133
134         if (dblock) {
135                 map_bh(bh_result, inode->i_sb, dblock);
136                 if (extlen > max_blocks)
137                         extlen = max_blocks;
138                 bh_result->b_size = extlen << inode->i_blkbits;
139         } else if (gfs2_assert_withdraw(ip->i_sbd, !create))
140                 error = -EIO;
141
142         return error;
143 }
144
145 /**
146  * gfs2_writepage - Write complete page
147  * @page: Page to write
148  *
149  * Returns: errno
150  *
151  * Use Linux VFS block_write_full_page() to write one page,
152  *   using GFS2's get_block_noalloc to find which blocks to write.
153  */
154
155 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
156 {
157         struct gfs2_inode *ip = get_v2ip(page->mapping->host);
158         struct gfs2_sbd *sdp = ip->i_sbd;
159         int error;
160
161         atomic_inc(&sdp->sd_ops_address);
162
163         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
164                 unlock_page(page);
165                 return -EIO;
166         }
167         if (get_transaction) {
168                 redirty_page_for_writepage(wbc, page);
169                 unlock_page(page);
170                 return 0;
171         }
172
173         error = block_write_full_page(page, get_block_noalloc, wbc);
174
175         gfs2_meta_cache_flush(ip);
176
177         return error;
178 }
179
180 /**
181  * stuffed_readpage - Fill in a Linux page with stuffed file data
182  * @ip: the inode
183  * @page: the page
184  *
185  * Returns: errno
186  */
187
188 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
189 {
190         struct buffer_head *dibh;
191         void *kaddr;
192         int error;
193
194         error = gfs2_meta_inode_buffer(ip, &dibh);
195         if (error)
196                 return error;
197
198         kaddr = kmap(page);
199         memcpy((char *)kaddr,
200                dibh->b_data + sizeof(struct gfs2_dinode),
201                ip->i_di.di_size);
202         memset((char *)kaddr + ip->i_di.di_size,
203                0,
204                PAGE_CACHE_SIZE - ip->i_di.di_size);
205         kunmap(page);
206
207         brelse(dibh);
208
209         SetPageUptodate(page);
210
211         return 0;
212 }
213
214 static int zero_readpage(struct page *page)
215 {
216         void *kaddr;
217
218         kaddr = kmap(page);
219         memset(kaddr, 0, PAGE_CACHE_SIZE);
220         kunmap(page);
221
222         SetPageUptodate(page);
223         unlock_page(page);
224
225         return 0;
226 }
227
228 /**
229  * jdata_readpage - readpage that goes through gfs2_jdata_read_mem()
230  * @ip:
231  * @page: The page to read
232  *
233  * Returns: errno
234  */
235
236 static int jdata_readpage(struct gfs2_inode *ip, struct page *page)
237 {
238         void *kaddr;
239         int ret;
240
241         kaddr = kmap(page);
242
243         ret = gfs2_jdata_read_mem(ip, kaddr,
244                                   (uint64_t)page->index << PAGE_CACHE_SHIFT,
245                                   PAGE_CACHE_SIZE);
246         if (ret >= 0) {
247                 if (ret < PAGE_CACHE_SIZE)
248                         memset(kaddr + ret, 0, PAGE_CACHE_SIZE - ret);
249                 SetPageUptodate(page);
250                 ret = 0;
251         }
252
253         kunmap(page);
254
255         unlock_page(page);
256
257         return ret;
258 }
259
260 /**
261  * gfs2_readpage - readpage with locking
262  * @file: The file to read a page for
263  * @page: The page to read
264  *
265  * Returns: errno
266  */
267
268 static int gfs2_readpage(struct file *file, struct page *page)
269 {
270         struct gfs2_inode *ip = get_v2ip(page->mapping->host);
271         struct gfs2_sbd *sdp = ip->i_sbd;
272         int error;
273
274         atomic_inc(&sdp->sd_ops_address);
275
276         if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl))) {
277                 unlock_page(page);
278                 return -EOPNOTSUPP;
279         }
280
281         if (!gfs2_is_jdata(ip)) {
282                 if (gfs2_is_stuffed(ip)) {
283                         if (!page->index) {
284                                 error = stuffed_readpage(ip, page);
285                                 unlock_page(page);
286                         } else
287                                 error = zero_readpage(page);
288                 } else
289                         error = block_read_full_page(page, get_block);
290         } else
291                 error = jdata_readpage(ip, page);
292
293         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
294                 error = -EIO;
295
296         return error;
297 }
298
299 /**
300  * gfs2_prepare_write - Prepare to write a page to a file
301  * @file: The file to write to
302  * @page: The page which is to be prepared for writing
303  * @from: From (byte range within page)
304  * @to: To (byte range within page)
305  *
306  * Returns: errno
307  */
308
309 static int gfs2_prepare_write(struct file *file, struct page *page,
310                               unsigned from, unsigned to)
311 {
312         struct gfs2_inode *ip = get_v2ip(page->mapping->host);
313         struct gfs2_sbd *sdp = ip->i_sbd;
314         int error = 0;
315
316         atomic_inc(&sdp->sd_ops_address);
317
318         if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
319                 return -EOPNOTSUPP;
320
321         if (gfs2_is_stuffed(ip)) {
322                 uint64_t file_size;
323                 file_size = ((uint64_t)page->index << PAGE_CACHE_SHIFT) + to;
324
325                 if (file_size > sdp->sd_sb.sb_bsize -
326                                 sizeof(struct gfs2_dinode)) {
327                         error = gfs2_unstuff_dinode(ip, gfs2_unstuffer_page,
328                                                     page);
329                         if (!error)
330                                 error = block_prepare_write(page, from, to,
331                                                             get_block);
332                 } else if (!PageUptodate(page))
333                         error = stuffed_readpage(ip, page);
334         } else
335                 error = block_prepare_write(page, from, to, get_block);
336
337         return error;
338 }
339
340 /**
341  * gfs2_commit_write - Commit write to a file
342  * @file: The file to write to
343  * @page: The page containing the data
344  * @from: From (byte range within page)
345  * @to: To (byte range within page)
346  *
347  * Returns: errno
348  */
349
350 static int gfs2_commit_write(struct file *file, struct page *page,
351                              unsigned from, unsigned to)
352 {
353         struct inode *inode = page->mapping->host;
354         struct gfs2_inode *ip = get_v2ip(inode);
355         struct gfs2_sbd *sdp = ip->i_sbd;
356         int error;
357
358         atomic_inc(&sdp->sd_ops_address);
359
360         if (gfs2_is_stuffed(ip)) {
361                 struct buffer_head *dibh;
362                 uint64_t file_size;
363                 void *kaddr;
364
365                 file_size = ((uint64_t)page->index << PAGE_CACHE_SHIFT) + to;
366
367                 error = gfs2_meta_inode_buffer(ip, &dibh);
368                 if (error)
369                         goto fail;
370
371                 gfs2_trans_add_bh(ip->i_gl, dibh);
372
373                 kaddr = kmap(page);
374                 memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
375                        (char *)kaddr + from,
376                        to - from);
377                 kunmap(page);
378
379                 brelse(dibh);
380
381                 SetPageUptodate(page);
382
383                 if (inode->i_size < file_size)
384                         i_size_write(inode, file_size);
385         } else {
386                 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED)
387                         gfs2_page_add_databufs(sdp, page, from, to);
388                 error = generic_commit_write(file, page, from, to);
389                 if (error)
390                         goto fail;
391         }
392
393         return 0;
394
395  fail:
396         ClearPageUptodate(page);
397
398         return error;
399 }
400
401 /**
402  * gfs2_bmap - Block map function
403  * @mapping: Address space info
404  * @lblock: The block to map
405  *
406  * Returns: The disk address for the block or 0 on hole or error
407  */
408
409 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
410 {
411         struct gfs2_inode *ip = get_v2ip(mapping->host);
412         struct gfs2_holder i_gh;
413         sector_t dblock = 0;
414         int error;
415
416         atomic_inc(&ip->i_sbd->sd_ops_address);
417
418         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
419         if (error)
420                 return 0;
421
422         if (!gfs2_is_stuffed(ip))
423                 dblock = generic_block_bmap(mapping, lblock, get_block);
424
425         gfs2_glock_dq_uninit(&i_gh);
426
427         return dblock;
428 }
429
430 static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
431 {
432         struct gfs2_databuf *db;
433
434         gfs2_log_lock(sdp);
435         db = get_v2db(bh);
436         if (db) {
437                 db->db_bh = NULL;
438                 set_v2db(bh, NULL);
439                 gfs2_log_unlock(sdp);
440                 brelse(bh);
441         } else
442                 gfs2_log_unlock(sdp);
443
444         lock_buffer(bh);
445         clear_buffer_dirty(bh);
446         bh->b_bdev = NULL;
447         clear_buffer_mapped(bh);
448         clear_buffer_req(bh);
449         clear_buffer_new(bh);
450         clear_buffer_delay(bh);
451         unlock_buffer(bh);
452 }
453
454 static int gfs2_invalidatepage(struct page *page, unsigned long offset)
455 {
456         struct gfs2_sbd *sdp = get_v2sdp(page->mapping->host->i_sb);
457         struct buffer_head *head, *bh, *next;
458         unsigned int curr_off = 0;
459         int ret = 1;
460
461         BUG_ON(!PageLocked(page));
462         if (!page_has_buffers(page))
463                 return 1;
464
465         bh = head = page_buffers(page);
466         do {
467                 unsigned int next_off = curr_off + bh->b_size;
468                 next = bh->b_this_page;
469
470                 if (offset <= curr_off)
471                         discard_buffer(sdp, bh);
472
473                 curr_off = next_off;
474                 bh = next;
475         } while (bh != head);
476
477         if (!offset)
478                 ret = try_to_release_page(page, 0);
479
480         return ret;
481 }
482
483 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
484                           loff_t offset, unsigned long nr_segs)
485 {
486         struct file *file = iocb->ki_filp;
487         struct inode *inode = file->f_mapping->host;
488         struct gfs2_inode *ip = get_v2ip(inode);
489         struct gfs2_sbd *sdp = ip->i_sbd;
490         get_blocks_t *gb = get_blocks;
491
492         atomic_inc(&sdp->sd_ops_address);
493
494         if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)) ||
495             gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
496                 return -EINVAL;
497
498         if (rw == WRITE && !get_transaction)
499                 gb = get_blocks_noalloc;
500
501         return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
502                                   offset, nr_segs, gb, NULL);
503 }
504
505 struct address_space_operations gfs2_file_aops = {
506         .writepage = gfs2_writepage,
507         .readpage = gfs2_readpage,
508         .sync_page = block_sync_page,
509         .prepare_write = gfs2_prepare_write,
510         .commit_write = gfs2_commit_write,
511         .bmap = gfs2_bmap,
512         .invalidatepage = gfs2_invalidatepage,
513         .direct_IO = gfs2_direct_IO,
514 };
515