2 * linux/fs/hfsplus/bnode.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handle basic btree node operations
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/pagemap.h>
15 #include <linux/swap.h>
16 #include <linux/version.h>
17 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
18 #include <linux/buffer_head.h>
21 #include "hfsplus_fs.h"
22 #include "hfsplus_raw.h"
26 int hfsplus_update_idx_rec(struct hfsplus_find_data *fd);
28 /* Get the given block associated with the tree owning node */
29 struct buffer_head *hfsplus_getblk(struct inode *inode, unsigned long n)
31 struct super_block *sb;
32 struct buffer_head tmp_bh;
35 if (hfsplus_get_block(inode, n, &tmp_bh, 1)) {
36 printk("HFS+-fs: Failed to find block for B*Tree data\n");
39 return sb_bread(sb, tmp_bh.b_blocknr);
42 /* Copy a specified range of bytes from the raw data of a node */
43 void hfsplus_bnode_readbytes(hfsplus_bnode *node, void *buf,
44 unsigned long off, unsigned long len)
49 off += node->page_offset;
50 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
51 off &= ~PAGE_CACHE_MASK;
53 l = min(len, PAGE_CACHE_SIZE - off);
54 memcpy(buf, hfsplus_kmap(*pagep) + off, l);
55 hfsplus_kunmap(*pagep++);
59 l = min(len, PAGE_CACHE_SIZE);
60 memcpy(buf, hfsplus_kmap(*pagep), l);
61 hfsplus_kunmap(*pagep++);
65 u16 hfsplus_bnode_read_u16(hfsplus_bnode *node, unsigned long off)
69 hfsplus_bnode_readbytes(node, &data, off, 2);
70 return be16_to_cpu(data);
73 void hfsplus_bnode_read_key(hfsplus_bnode *node, void *key, unsigned long off)
76 unsigned long key_len;
79 if (node->kind == HFSPLUS_NODE_LEAF ||
80 tree->attributes & HFSPLUS_TREE_VAR_NDXKEY_SIZE)
81 key_len = hfsplus_bnode_read_u16(node, off) + 2;
83 key_len = tree->max_key_len + 2;
85 hfsplus_bnode_readbytes(node, key, off, key_len);
88 void hfsplus_bnode_writebytes(hfsplus_bnode *node, void *buf,
89 unsigned long off, unsigned long len)
94 off += node->page_offset;
95 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
96 off &= ~PAGE_CACHE_MASK;
98 l = min(len, PAGE_CACHE_SIZE - off);
99 memcpy(hfsplus_kmap(*pagep) + off, buf, l);
100 set_page_dirty(*pagep);
101 hfsplus_kunmap(*pagep++);
105 l = min(len, PAGE_CACHE_SIZE);
106 memcpy(hfsplus_kmap(*pagep), buf, l);
107 set_page_dirty(*pagep);
108 hfsplus_kunmap(*pagep++);
112 void hfsplus_bnode_write_u16(hfsplus_bnode *node, unsigned long off, u16 data)
114 data = cpu_to_be16(data);
116 hfsplus_bnode_writebytes(node, &data, off, 2);
119 void hfsplus_bnode_copybytes(hfsplus_bnode *dst_node, unsigned long dst,
120 hfsplus_bnode *src_node, unsigned long src, unsigned long len)
122 struct hfsplus_btree *tree;
123 struct page **src_page, **dst_page;
126 dprint(DBG_BNODE_MOD, "copybytes: %lu,%lu,%lu\n", dst, src, len);
129 tree = src_node->tree;
130 src += src_node->page_offset;
131 dst += dst_node->page_offset;
132 src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
133 src &= ~PAGE_CACHE_MASK;
134 dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
135 dst &= ~PAGE_CACHE_MASK;
138 l = min(len, PAGE_CACHE_SIZE - src);
139 memcpy(hfsplus_kmap(*dst_page) + src, hfsplus_kmap(*src_page) + src, l);
140 hfsplus_kunmap(*src_page++);
141 set_page_dirty(*dst_page);
142 hfsplus_kunmap(*dst_page++);
145 l = min(len, PAGE_CACHE_SIZE);
146 memcpy(hfsplus_kmap(*dst_page), hfsplus_kmap(*src_page), l);
147 hfsplus_kunmap(*src_page++);
148 set_page_dirty(*dst_page);
149 hfsplus_kunmap(*dst_page++);
152 void *src_ptr, *dst_ptr;
155 src_ptr = hfsplus_kmap(*src_page) + src;
156 dst_ptr = hfsplus_kmap(*dst_page) + dst;
157 if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
158 l = PAGE_CACHE_SIZE - src;
162 l = PAGE_CACHE_SIZE - dst;
167 memcpy(dst_ptr, src_ptr, l);
168 hfsplus_kunmap(*src_page);
169 set_page_dirty(*dst_page);
170 hfsplus_kunmap(*dst_page);
175 } while ((len -= l));
179 void hfsplus_bnode_movebytes(hfsplus_bnode *node, unsigned long dst,
180 unsigned long src, unsigned long len)
182 struct page **src_page, **dst_page;
185 dprint(DBG_BNODE_MOD, "movebytes: %lu,%lu,%lu\n", dst, src, len);
188 src += node->page_offset;
189 dst += node->page_offset;
192 src_page = node->page + (src >> PAGE_CACHE_SHIFT);
193 src = (src & ~PAGE_CACHE_MASK) + 1;
195 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
196 dst = (dst & ~PAGE_CACHE_MASK) + 1;
200 memmove(hfsplus_kmap(*dst_page), hfsplus_kmap(*src_page), src);
201 hfsplus_kunmap(*src_page--);
202 set_page_dirty(*dst_page);
203 hfsplus_kunmap(*dst_page--);
205 src = PAGE_CACHE_SIZE;
208 memmove(hfsplus_kmap(*dst_page) + src, hfsplus_kmap(*src_page) + src, len);
209 hfsplus_kunmap(*src_page);
210 set_page_dirty(*dst_page);
211 hfsplus_kunmap(*dst_page);
213 void *src_ptr, *dst_ptr;
216 src_ptr = hfsplus_kmap(*src_page) + src;
217 dst_ptr = hfsplus_kmap(*dst_page) + dst;
220 src = PAGE_CACHE_SIZE;
225 dst = PAGE_CACHE_SIZE;
228 memmove(dst_ptr - l, src_ptr - l, l);
229 hfsplus_kunmap(*src_page);
230 set_page_dirty(*dst_page);
231 hfsplus_kunmap(*dst_page);
232 if (dst == PAGE_CACHE_SIZE)
236 } while ((len -= l));
239 src_page = node->page + (src >> PAGE_CACHE_SHIFT);
240 src &= ~PAGE_CACHE_MASK;
241 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
242 dst &= ~PAGE_CACHE_MASK;
245 l = min(len, PAGE_CACHE_SIZE - src);
246 memmove(hfsplus_kmap(*dst_page) + src, hfsplus_kmap(*src_page) + src, l);
247 hfsplus_kunmap(*src_page++);
248 set_page_dirty(*dst_page);
249 hfsplus_kunmap(*dst_page++);
252 l = min(len, PAGE_CACHE_SIZE);
253 memmove(hfsplus_kmap(*dst_page), hfsplus_kmap(*src_page), l);
254 hfsplus_kunmap(*src_page++);
255 set_page_dirty(*dst_page);
256 hfsplus_kunmap(*dst_page++);
259 void *src_ptr, *dst_ptr;
262 src_ptr = hfsplus_kmap(*src_page) + src;
263 dst_ptr = hfsplus_kmap(*dst_page) + dst;
264 if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
265 l = PAGE_CACHE_SIZE - src;
269 l = PAGE_CACHE_SIZE - dst;
274 memmove(dst_ptr, src_ptr, l);
275 hfsplus_kunmap(*src_page);
276 set_page_dirty(*dst_page);
277 hfsplus_kunmap(*dst_page);
282 } while ((len -= l));
287 void hfsplus_bnode_dump(hfsplus_bnode *node)
289 hfsplus_btree_node_desc desc;
293 dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this);
294 hfsplus_bnode_readbytes(node, &desc, 0, sizeof(desc));
295 dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n",
296 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
297 desc.kind, desc.height, be16_to_cpu(desc.num_rec));
299 off = node->tree->node_size - 2;
300 for (i = be16_to_cpu(desc.num_rec); i >= 0; off -= 2, i--) {
301 key_off = hfsplus_bnode_read_u16(node, off);
302 dprint(DBG_BNODE_MOD, " %d", key_off);
303 if (i && node->kind == HFSPLUS_NODE_NDX) {
306 tmp = hfsplus_bnode_read_u16(node, key_off);
307 dprint(DBG_BNODE_MOD, " (%d", tmp);
308 hfsplus_bnode_readbytes(node, &cnid, key_off + 2 + tmp, 4);
309 dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid));
312 dprint(DBG_BNODE_MOD, "\n");
315 int hfsplus_btree_add_level(hfsplus_btree *tree)
317 hfsplus_bnode *node, *new_node;
318 hfsplus_btree_node_desc node_desc;
324 node = hfsplus_find_bnode(tree, tree->root);
325 new_node = hfsplus_btree_alloc_node(tree);
326 if (IS_ERR(new_node))
327 return PTR_ERR(new_node);
329 tree->root = new_node->this;
331 tree->leaf_head = tree->leaf_tail = new_node->this;
332 new_node->kind = HFSPLUS_NODE_LEAF;
333 new_node->num_recs = 0;
335 new_node->kind = HFSPLUS_NODE_NDX;
336 new_node->num_recs = 1;
338 new_node->parent = 0;
341 new_node->height = ++tree->depth;
343 node_desc.next = cpu_to_be32(new_node->next);
344 node_desc.prev = cpu_to_be32(new_node->prev);
345 node_desc.kind = new_node->kind;
346 node_desc.height = new_node->height;
347 node_desc.num_rec = cpu_to_be16(new_node->num_recs);
348 node_desc.reserved = 0;
349 hfsplus_bnode_writebytes(new_node, &node_desc, 0, sizeof(node_desc));
351 rec = tree->node_size - 2;
352 hfsplus_bnode_write_u16(new_node, rec, 14);
355 /* insert old root idx into new root */
356 node->parent = tree->root;
357 key_size = hfsplus_bnode_read_u16(node, 14) + 2;
358 // key_size if index node
359 hfsplus_bnode_copybytes(new_node, 14, node, 14, key_size);
360 cnid = cpu_to_be32(node->this);
361 hfsplus_bnode_writebytes(new_node, &cnid, 14 + key_size, 4);
364 hfsplus_bnode_write_u16(new_node, rec, 14 + key_size + 4);
366 hfsplus_put_bnode(node);
368 hfsplus_put_bnode(new_node);
369 mark_inode_dirty(tree->inode);
374 hfsplus_bnode *hfsplus_bnode_split(struct hfsplus_find_data *fd)
377 hfsplus_bnode *node, *new_node;
378 hfsplus_btree_node_desc node_desc;
379 int num_recs, new_rec_off, new_off, old_rec_off;
380 int data_start, data_end, size;
384 new_node = hfsplus_btree_alloc_node(tree);
385 if (IS_ERR(new_node))
387 hfsplus_get_bnode(node);
388 dprint(DBG_BNODE_MOD, "split_nodes: %d - %d - %d\n",
389 node->this, new_node->this, node->next);
390 new_node->next = node->next;
391 new_node->prev = node->this;
392 new_node->parent = node->parent;
393 new_node->kind = node->kind;
394 new_node->height = node->height;
396 size = tree->node_size / 2;
397 old_rec_off = tree->node_size - 4;
400 data_start = hfsplus_bnode_read_u16(node, old_rec_off);
401 if (data_start > size)
405 if (num_recs < node->num_recs)
408 hfsplus_put_bnode(node);
409 hfsplus_put_bnode(new_node);
413 if (fd->record + 1 < num_recs) {
414 /* new record is in the lower half,
415 * so leave some more space there
419 data_start = hfsplus_bnode_read_u16(node, old_rec_off);
421 hfsplus_put_bnode(node);
422 hfsplus_get_bnode(new_node);
423 fd->bnode = new_node;
424 fd->record -= num_recs;
425 fd->keyoffset -= data_start;
426 fd->entryoffset -= data_start;
428 new_node->num_recs = node->num_recs - num_recs;
429 node->num_recs = num_recs;
431 new_rec_off = tree->node_size - 2;
433 size = data_start - new_off;
434 num_recs = new_node->num_recs;
435 data_end = data_start;
437 hfsplus_bnode_write_u16(new_node, new_rec_off, new_off);
440 data_end = hfsplus_bnode_read_u16(node, old_rec_off);
441 new_off = data_end - size;
444 hfsplus_bnode_write_u16(new_node, new_rec_off, new_off);
445 hfsplus_bnode_copybytes(new_node, 14, node, data_start, data_end - data_start);
447 /* update new bnode header */
448 node_desc.next = cpu_to_be32(new_node->next);
449 node_desc.prev = cpu_to_be32(new_node->prev);
450 node_desc.kind = new_node->kind;
451 node_desc.height = new_node->height;
452 node_desc.num_rec = cpu_to_be16(new_node->num_recs);
453 node_desc.reserved = 0;
454 hfsplus_bnode_writebytes(new_node, &node_desc, 0, sizeof(node_desc));
456 /* update previous bnode header */
457 node->next = new_node->this;
458 hfsplus_bnode_readbytes(node, &node_desc, 0, sizeof(node_desc));
459 node_desc.next = cpu_to_be32(node->next);
460 node_desc.num_rec = cpu_to_be16(node->num_recs);
461 hfsplus_bnode_writebytes(node, &node_desc, 0, sizeof(node_desc));
463 /* update next bnode header */
464 if (new_node->next) {
465 hfsplus_bnode *next_node = hfsplus_find_bnode(tree, new_node->next);
466 next_node->prev = new_node->this;
467 hfsplus_bnode_readbytes(next_node, &node_desc, 0, sizeof(node_desc));
468 node_desc.prev = cpu_to_be32(next_node->prev);
469 hfsplus_bnode_writebytes(next_node, &node_desc, 0, sizeof(node_desc));
470 hfsplus_put_bnode(next_node);
471 } else if (node->this == tree->leaf_tail) {
472 /* if there is no next node, this might be the new tail */
473 tree->leaf_tail = new_node->this;
474 mark_inode_dirty(tree->inode);
477 hfsplus_bnode_dump(node);
478 hfsplus_bnode_dump(new_node);
479 hfsplus_put_bnode(node);
484 int hfsplus_bnode_insert_rec(struct hfsplus_find_data *fd, void *entry, int entry_len)
487 hfsplus_bnode *node, *new_node;
488 int size, key_len, rec;
489 int data_off, end_off;
490 int idx_rec_off, data_rec_off, end_rec_off;
496 hfsplus_btree_add_level(tree);
497 fd->bnode = hfsplus_find_bnode(tree, tree->leaf_head);
502 /* new record idx and complete record size */
503 rec = fd->record + 1;
504 key_len = be16_to_cpu(fd->search_key->key_len) + 2;
505 size = key_len + entry_len;
508 hfsplus_bnode_dump(node);
509 /* get last offset */
510 end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
511 end_off = hfsplus_bnode_read_u16(node, end_rec_off);
513 dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off);
514 if (size > end_rec_off - end_off) {
516 panic("not enough room!\n");
517 new_node = hfsplus_bnode_split(fd);
518 if (IS_ERR(new_node))
519 return PTR_ERR(new_node);
522 if (node->kind == HFSPLUS_NODE_LEAF) {
524 mark_inode_dirty(tree->inode);
527 /* write new last offset */
528 hfsplus_bnode_write_u16(node, offsetof(hfsplus_btree_node_desc, num_rec), node->num_recs);
529 hfsplus_bnode_write_u16(node, end_rec_off, end_off + size);
531 data_rec_off = end_rec_off + 2;
532 idx_rec_off = tree->node_size - (rec + 1) * 2;
533 if (idx_rec_off == data_rec_off)
535 /* move all following entries */
537 data_off = hfsplus_bnode_read_u16(node, data_rec_off + 2);
538 hfsplus_bnode_write_u16(node, data_rec_off, data_off + size);
540 } while (data_rec_off < idx_rec_off);
543 hfsplus_bnode_movebytes(node, data_off + size, data_off,
547 hfsplus_bnode_writebytes(node, fd->search_key, data_off, key_len);
548 hfsplus_bnode_writebytes(node, entry, data_off + key_len, entry_len);
549 hfsplus_bnode_dump(node);
552 if (!rec && new_node != node)
553 hfsplus_update_idx_rec(fd);
555 hfsplus_put_bnode(fd->bnode);
556 if (!new_node->parent) {
557 hfsplus_btree_add_level(tree);
558 new_node->parent = tree->root;
560 fd->bnode = hfsplus_find_bnode(tree, new_node->parent);
562 /* create index data entry */
563 cnid = cpu_to_be32(new_node->this);
565 entry_len = sizeof(cnid);
568 hfsplus_bnode_read_key(new_node, fd->search_key, 14);
569 hfsplus_find_rec(fd->bnode, fd);
571 hfsplus_put_bnode(new_node);
577 hfsplus_update_idx_rec(fd);
582 int hfsplus_update_idx_rec(struct hfsplus_find_data *fd)
585 hfsplus_bnode *node, *new_node, *parent;
587 int rec, rec_off, end_rec_off;
588 int start_off, end_off;
597 parent = hfsplus_find_bnode(tree, node->parent);
598 hfsplus_find_rec(parent, fd);
599 hfsplus_bnode_dump(parent);
602 /* size difference between old and new key */
603 newkeylen = hfsplus_bnode_read_u16(node, 14) + 2;
604 dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen);
606 rec_off = tree->node_size - (rec + 2) * 2;
607 end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
608 diff = newkeylen - fd->keylength;
612 end_off = hfsplus_bnode_read_u16(parent, end_rec_off);
613 if (end_rec_off - end_off < diff) {
615 printk("splitting index node...\n");
617 new_node = hfsplus_bnode_split(fd);
618 if (IS_ERR(new_node))
619 return PTR_ERR(new_node);
622 rec_off = tree->node_size - (rec + 2) * 2;
623 end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
627 end_off = start_off = hfsplus_bnode_read_u16(parent, rec_off);
628 hfsplus_bnode_write_u16(parent, rec_off, start_off + diff);
629 start_off -= 4; /* move previous cnid too */
631 while (rec_off > end_rec_off) {
633 end_off = hfsplus_bnode_read_u16(parent, rec_off);
634 hfsplus_bnode_write_u16(parent, rec_off, end_off + diff);
636 hfsplus_bnode_movebytes(parent, start_off + diff, start_off,
637 end_off - start_off);
639 hfsplus_bnode_copybytes(parent, fd->keyoffset, node, 14, newkeylen);
640 hfsplus_bnode_dump(parent);
642 hfsplus_put_bnode(node);
648 fd->bnode = hfsplus_find_bnode(tree, new_node->parent);
649 /* create index key and entry */
650 hfsplus_bnode_read_key(new_node, fd->search_key, 14);
651 cnid = cpu_to_be32(new_node->this);
653 hfsplus_find_rec(fd->bnode, fd);
654 hfsplus_bnode_insert_rec(fd, &cnid, sizeof(cnid));
655 hfsplus_put_bnode(fd->bnode);
656 hfsplus_put_bnode(new_node);
659 if (new_node == node)
661 /* restore search_key */
662 hfsplus_bnode_read_key(node, fd->search_key, 14);
666 if (!rec && node->parent)
673 int hfsplus_bnode_remove_rec(struct hfsplus_find_data *fd)
676 hfsplus_bnode *node, *parent;
677 int end_off, rec_off, data_off, size;
682 rec_off = tree->node_size - (fd->record + 2) * 2;
683 end_off = tree->node_size - (node->num_recs + 1) * 2;
685 if (node->kind == HFSPLUS_NODE_LEAF) {
687 mark_inode_dirty(tree->inode);
689 hfsplus_bnode_dump(node);
690 dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength);
691 if (!--node->num_recs) {
692 hfsplus_btree_remove_node(node);
695 parent = hfsplus_find_bnode(tree, node->parent);
698 hfsplus_put_bnode(node);
699 node = fd->bnode = parent;
701 hfsplus_find_rec(node, fd);
704 hfsplus_bnode_write_u16(node, offsetof(hfsplus_btree_node_desc, num_rec), node->num_recs);
706 if (rec_off == end_off)
708 size = fd->keylength + fd->entrylength;
711 data_off = hfsplus_bnode_read_u16(node, rec_off);
712 hfsplus_bnode_write_u16(node, rec_off + 2, data_off - size);
714 } while (rec_off >= end_off);
717 hfsplus_bnode_movebytes(node, fd->keyoffset, fd->keyoffset + size,
718 data_off - fd->keyoffset - size);
720 hfsplus_bnode_dump(node);
722 hfsplus_update_idx_rec(fd);
726 /* Check for valid kind/height pairs , return 0 for bad pairings */
727 static int hfsplus_check_kh(hfsplus_btree *tree, u8 kind, u8 height)
729 if ((kind == HFSPLUS_NODE_HEAD) || (kind == HFSPLUS_NODE_MAP)) {
732 } else if (kind == HFSPLUS_NODE_LEAF) {
735 } else if (kind == HFSPLUS_NODE_NDX) {
736 if ((height <= 1) || (height > tree->depth))
739 printk("HFS+-fs: unknown node type in B*Tree\n");
744 printk("HFS+-fs: corrupt node height in B*Tree\n");
748 static inline int hfsplus_bnode_hash(u32 num)
750 num = (num >> 16) + num;
752 return num & (NODE_HASH_SIZE - 1);
755 hfsplus_bnode *__hfsplus_find_bnode(hfsplus_btree *tree, u32 cnid)
759 if (cnid >= tree->node_count) {
760 printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid);
764 for (node = tree->node_hash[hfsplus_bnode_hash(cnid)];
765 node; node = node->next_hash) {
766 if (node->this == cnid) {
773 hfsplus_bnode *__hfsplus_create_bnode(hfsplus_btree *tree, u32 cnid)
775 struct super_block *sb;
776 hfsplus_bnode *node, *node2;
777 struct address_space *mapping;
779 int size, block, i, hash;
782 if (cnid >= tree->node_count) {
783 printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid);
787 sb = tree->inode->i_sb;
788 size = sizeof(hfsplus_bnode) + tree->pages_per_bnode *
789 sizeof(struct page *);
790 node = kmalloc(size, GFP_KERNEL);
793 memset(node, 0, size);
796 set_bit(HFSPLUS_BNODE_NEW, &node->flags);
797 atomic_set(&node->refcnt, 1);
798 dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n",
799 node->tree->cnid, node->this);
800 init_waitqueue_head(&node->lock_wq);
801 spin_lock(&tree->hash_lock);
802 node2 = __hfsplus_find_bnode(tree, cnid);
804 hash = hfsplus_bnode_hash(cnid);
805 node->next_hash = tree->node_hash[hash];
806 tree->node_hash[hash] = node;
807 tree->node_hash_cnt++;
809 spin_unlock(&tree->hash_lock);
811 wait_event(node2->lock_wq, !test_bit(HFSPLUS_BNODE_NEW, &node2->flags));
814 spin_unlock(&tree->hash_lock);
816 mapping = tree->inode->i_mapping;
817 off = (loff_t)cnid * tree->node_size;
818 block = off >> PAGE_CACHE_SHIFT;
819 node->page_offset = off & ~PAGE_CACHE_MASK;
820 for (i = 0; i < tree->pages_per_bnode; i++) {
821 page = grab_cache_page(mapping, block++);
824 if (!PageUptodate(page)) {
825 if (mapping->a_ops->readpage(NULL, page))
827 wait_on_page_locked(page);
828 if (!PageUptodate(page))
833 page_cache_release(page);
835 node->page[i] = page;
841 page_cache_release(page);
842 set_bit(HFSPLUS_BNODE_ERROR, &node->flags);
846 void __hfsplus_bnode_remove(hfsplus_bnode *node)
850 dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n",
851 node->tree->cnid, node->this, atomic_read(&node->refcnt));
852 for (p = &node->tree->node_hash[hfsplus_bnode_hash(node->this)];
853 *p && *p != node; p = &(*p)->next_hash)
857 *p = node->next_hash;
858 node->tree->node_hash_cnt--;
861 /* Load a particular node out of a tree */
862 hfsplus_bnode *hfsplus_find_bnode(hfsplus_btree *tree, u32 num)
865 hfsplus_btree_node_desc *desc;
866 int i, rec_off, off, next_off;
867 int entry_size, key_size;
869 spin_lock(&tree->hash_lock);
870 node = __hfsplus_find_bnode(tree, num);
872 hfsplus_get_bnode(node);
873 spin_unlock(&tree->hash_lock);
874 wait_event(node->lock_wq, !test_bit(HFSPLUS_BNODE_NEW, &node->flags));
877 spin_unlock(&tree->hash_lock);
878 node = __hfsplus_create_bnode(tree, num);
880 return ERR_PTR(-ENOMEM);
881 if (!test_bit(HFSPLUS_BNODE_NEW, &node->flags))
884 desc = (hfsplus_btree_node_desc *)(hfsplus_kmap(node->page[0]) + node->page_offset);
885 node->prev = be32_to_cpu(desc->prev);
886 node->next = be32_to_cpu(desc->next);
887 node->num_recs = be16_to_cpu(desc->num_rec);
888 node->kind = desc->kind;
889 node->height = desc->height;
891 if (!hfsplus_check_kh(tree, desc->kind, desc->height)) {
892 hfsplus_kunmap(node->page[0]);
895 hfsplus_kunmap(node->page[0]);
897 rec_off = tree->node_size - 2;
898 off = hfsplus_bnode_read_u16(node, rec_off);
899 if (off != sizeof(hfsplus_btree_node_desc))
901 for (i = 1; i <= node->num_recs; off = next_off, i++) {
903 next_off = hfsplus_bnode_read_u16(node, rec_off);
904 if (next_off <= off ||
905 next_off > tree->node_size ||
908 entry_size = next_off - off;
909 if (node->kind != HFSPLUS_NODE_NDX &&
910 node->kind != HFSPLUS_NODE_LEAF)
912 key_size = hfsplus_bnode_read_u16(node, off) + 2;
913 if (key_size >= entry_size || key_size & 1)
916 clear_bit(HFSPLUS_BNODE_NEW, &node->flags);
917 wake_up(&node->lock_wq);
921 set_bit(HFSPLUS_BNODE_ERROR, &node->flags);
922 clear_bit(HFSPLUS_BNODE_NEW, &node->flags);
923 wake_up(&node->lock_wq);
924 hfsplus_put_bnode(node);
925 return ERR_PTR(-EIO);
928 void hfsplus_bnode_free(hfsplus_bnode *node)
932 //for (i = 0; i < node->tree->pages_per_bnode; i++)
933 // if (node->page[i])
934 // page_cache_release(node->page[i]);
938 hfsplus_bnode *hfsplus_create_bnode(hfsplus_btree *tree, u32 num)
944 spin_lock(&tree->hash_lock);
945 node = __hfsplus_find_bnode(tree, num);
946 spin_unlock(&tree->hash_lock);
949 node = __hfsplus_create_bnode(tree, num);
951 return ERR_PTR(-ENOMEM);
954 memset(hfsplus_kmap(*pagep) + node->page_offset, 0,
955 min((int)PAGE_CACHE_SIZE, (int)tree->node_size));
956 set_page_dirty(*pagep);
957 hfsplus_kunmap(*pagep++);
958 for (i = 1; i < tree->pages_per_bnode; i++) {
959 memset(hfsplus_kmap(*pagep), 0, PAGE_CACHE_SIZE);
960 set_page_dirty(*pagep);
961 hfsplus_kunmap(*pagep++);
963 clear_bit(HFSPLUS_BNODE_NEW, &node->flags);
964 wake_up(&node->lock_wq);
969 void hfsplus_get_bnode(hfsplus_bnode *node)
972 atomic_inc(&node->refcnt);
976 for (i = 0; i < node->tree->pages_per_bnode; i++)
977 get_page(node->page[i]);
980 dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n",
981 node->tree->cnid, node->this, atomic_read(&node->refcnt));
985 /* Dispose of resources used by a node */
986 void hfsplus_put_bnode(hfsplus_bnode *node)
989 struct hfsplus_btree *tree = node->tree;
992 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
993 node->tree->cnid, node->this, atomic_read(&node->refcnt));
994 if (!atomic_read(&node->refcnt))
996 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) {
998 for (i = 0; i < tree->pages_per_bnode; i++)
999 put_page(node->page[i]);
1003 for (i = 0; i < tree->pages_per_bnode; i++) {
1004 mark_page_accessed(node->page[i]);
1006 put_page(node->page[i]);
1010 if (test_bit(HFSPLUS_BNODE_DELETED, &node->flags)) {
1011 __hfsplus_bnode_remove(node);
1012 spin_unlock(&tree->hash_lock);
1013 hfsplus_btree_free_node(node);
1014 hfsplus_bnode_free(node);
1017 spin_unlock(&tree->hash_lock);
1021 void hfsplus_lock_bnode(hfsplus_bnode *node)
1023 wait_event(node->lock_wq, !test_and_set_bit(HFSPLUS_BNODE_LOCK, &node->flags));
1026 void hfsplus_unlock_bnode(hfsplus_bnode *node)
1028 clear_bit(HFSPLUS_BNODE_LOCK, &node->flags);
1029 if (waitqueue_active(&node->lock_wq))
1030 wake_up(&node->lock_wq);