2 * linux/fs/sysv/itree.c
4 * Handling of indirect blocks' trees.
9 #include <linux/sysv_fs.h>
10 #include <linux/locks.h>
11 #include <linux/smp_lock.h>
13 enum {DIRECT = 10, DEPTH = 4}; /* Have triple indirect */
15 static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode)
17 mark_buffer_dirty_inode(bh, inode);
19 ll_rw_block (WRITE, 1, &bh);
24 static int block_to_path(struct inode *inode, long block, int offsets[DEPTH])
26 struct super_block *sb = inode->i_sb;
27 int ptrs_bits = sb->sv_ind_per_block_bits;
28 unsigned long indirect_blocks = sb->sv_ind_per_block,
29 double_blocks = sb->sv_ind_per_block_2;
33 printk("sysv_block_map: block < 0\n");
34 } else if (block < DIRECT) {
36 } else if ( (block -= DIRECT) < indirect_blocks) {
37 offsets[n++] = DIRECT;
39 } else if ((block -= indirect_blocks) < double_blocks) {
40 offsets[n++] = DIRECT+1;
41 offsets[n++] = block >> ptrs_bits;
42 offsets[n++] = block & (indirect_blocks - 1);
43 } else if (((block -= double_blocks) >> (ptrs_bits * 2)) < indirect_blocks) {
44 offsets[n++] = DIRECT+2;
45 offsets[n++] = block >> (ptrs_bits * 2);
46 offsets[n++] = (block >> ptrs_bits) & (indirect_blocks - 1);
47 offsets[n++] = block & (indirect_blocks - 1);
54 static inline int block_to_cpu(struct super_block *sb, u32 nr)
56 return sb->sv_block_base + fs32_to_cpu(sb, nr);
62 struct buffer_head *bh;
65 static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v)
71 static inline int verify_chain(Indirect *from, Indirect *to)
73 while (from <= to && from->key == *from->p)
78 static inline u32 *block_end(struct buffer_head *bh)
80 return (u32*)((char*)bh->b_data + bh->b_size);
83 static Indirect *get_branch(struct inode *inode,
89 struct super_block *sb = inode->i_sb;
91 struct buffer_head *bh;
94 add_chain (chain, NULL, inode->u.sysv_i.i_data + *offsets);
98 int block = block_to_cpu(sb, p->key);
99 bh = sb_bread(sb, block);
102 if (!verify_chain(chain, p))
104 add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
119 static int alloc_branch(struct inode *inode,
124 int blocksize = inode->i_sb->s_blocksize;
128 branch[0].key = sysv_new_block(inode->i_sb);
129 if (branch[0].key) for (n = 1; n < num; n++) {
130 struct buffer_head *bh;
132 /* Allocate the next block */
133 branch[n].key = sysv_new_block(inode->i_sb);
137 * Get buffer_head for parent block, zero it out and set
138 * the pointer to new one, then send parent to disk.
140 parent = block_to_cpu(inode->i_sb, branch[n-1].key);
141 bh = sb_getblk(inode->i_sb, parent);
143 memset(bh->b_data, 0, blocksize);
145 branch[n].p = (u32*) bh->b_data + offsets[n];
146 *branch[n].p = branch[n].key;
147 mark_buffer_uptodate(bh, 1);
149 dirty_indirect(bh, inode);
154 /* Allocation failed, free what we already allocated */
155 for (i = 1; i < n; i++)
156 bforget(branch[i].bh);
157 for (i = 0; i < n; i++)
158 sysv_free_block(inode->i_sb, branch[i].key);
162 static inline int splice_branch(struct inode *inode,
168 /* Verify that place we are splicing to is still there and vacant */
170 if (!verify_chain(chain, where-1) || *where->p)
173 *where->p = where->key;
174 inode->i_ctime = CURRENT_TIME;
176 /* had we spliced it onto indirect block? */
178 dirty_indirect(where->bh, inode);
181 sysv_sync_inode(inode);
183 mark_inode_dirty(inode);
187 for (i = 1; i < num; i++)
188 bforget(where[i].bh);
189 for (i = 0; i < num; i++)
190 sysv_free_block(inode->i_sb, where[i].key);
194 static int get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
198 Indirect chain[DEPTH];
199 struct super_block *sb = inode->i_sb;
202 int depth = block_to_path(inode, iblock, offsets);
209 partial = get_branch(inode, depth, offsets, chain, &err);
211 /* Simplest case - block found, no allocation needed */
214 bh_result->b_dev = sb->s_dev;
215 bh_result->b_blocknr = block_to_cpu(sb, chain[depth-1].key);
216 bh_result->b_state |= (1UL << BH_Mapped);
217 /* Clean up and exit */
218 partial = chain+depth-1; /* the whole chain */
222 /* Next simple case - plain lookup or failed read of indirect block */
223 if (!create || err == -EIO) {
225 while (partial > chain) {
235 * Indirect block might be removed by truncate while we were
236 * reading it. Handling of that case (forget what we've got and
237 * reread) is taken out of the main path.
242 left = (chain + depth) - partial;
243 err = alloc_branch(inode, left, offsets+(partial-chain), partial);
247 if (splice_branch(inode, chain, partial, left) < 0)
250 bh_result->b_state |= (1UL << BH_New);
254 while (partial > chain) {
261 static inline int all_zeroes(u32 *p, u32 *q)
269 static Indirect *find_shared(struct inode *inode,
275 Indirect *partial, *p;
279 for (k = depth; k > 1 && !offsets[k-1]; k--)
281 partial = get_branch(inode, k, offsets, chain, &err);
283 partial = chain + k-1;
285 * If the branch acquired continuation since we've looked at it -
286 * fine, it should all survive and (new) top doesn't belong to us.
288 if (!partial->key && *partial->p)
290 for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)
293 * OK, we've found the last block that must survive. The rest of our
294 * branch should be detached before unlocking. However, if that rest
295 * of branch is all ours and does not grow immediately from the inode
296 * it's easier to cheat and just decrement partial->p.
298 if (p == chain + k - 1 && p > chain) {
313 static inline void free_data(struct inode *inode, u32 *p, u32 *q)
315 for ( ; p < q ; p++) {
319 sysv_free_block(inode->i_sb, nr);
320 mark_inode_dirty(inode);
325 static void free_branches(struct inode *inode, u32 *p, u32 *q, int depth)
327 struct buffer_head * bh;
328 struct super_block *sb = inode->i_sb;
331 for ( ; p < q ; p++) {
337 block = block_to_cpu(sb, nr);
338 bh = sb_bread(sb, block);
341 free_branches(inode, (u32*)bh->b_data,
342 block_end(bh), depth);
344 sysv_free_block(sb, nr);
345 mark_inode_dirty(inode);
348 free_data(inode, p, q);
351 void sysv_truncate (struct inode * inode)
353 u32 *i_data = inode->u.sysv_i.i_data;
355 Indirect chain[DEPTH];
362 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
363 S_ISLNK(inode->i_mode)))
366 blocksize = inode->i_sb->s_blocksize;
367 iblock = (inode->i_size + blocksize-1)
368 >> inode->i_sb->s_blocksize_bits;
370 block_truncate_page(inode->i_mapping, inode->i_size, get_block);
372 n = block_to_path(inode, iblock, offsets);
377 free_data(inode, i_data+offsets[0], i_data + DIRECT);
381 partial = find_shared(inode, n, offsets, chain, &nr);
382 /* Kill the top of shared branch (already detached) */
384 if (partial == chain)
385 mark_inode_dirty(inode);
387 dirty_indirect(partial->bh, inode);
388 free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
390 /* Clear the ends of indirect blocks on the shared branch */
391 while (partial > chain) {
392 free_branches(inode, partial->p + 1, block_end(partial->bh),
393 (chain+n-1) - partial);
394 dirty_indirect(partial->bh, inode);
395 brelse (partial->bh);
399 /* Kill the remaining (whole) subtrees (== subtrees deeper than...) */
401 nr = i_data[DIRECT + n - 1];
403 i_data[DIRECT + n - 1] = 0;
404 mark_inode_dirty(inode);
405 free_branches(inode, &nr, &nr+1, n);
409 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
411 sysv_sync_inode (inode);
413 mark_inode_dirty(inode);
416 static int sysv_writepage(struct page *page)
418 return block_write_full_page(page,get_block);
420 static int sysv_readpage(struct file *file, struct page *page)
422 return block_read_full_page(page,get_block);
424 static int sysv_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
426 return block_prepare_write(page,from,to,get_block);
428 static int sysv_bmap(struct address_space *mapping, long block)
430 return generic_block_bmap(mapping,block,get_block);
432 struct address_space_operations sysv_aops = {
433 readpage: sysv_readpage,
434 writepage: sysv_writepage,
435 sync_page: block_sync_page,
436 prepare_write: sysv_prepare_write,
437 commit_write: generic_commit_write,