5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hpesjro.fc.hp.com
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2001 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
37 #include <linux/locks.h>
39 #include <linux/smp_lock.h>
40 #include <linux/module.h>
45 MODULE_AUTHOR("Ben Fennema");
46 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
47 MODULE_LICENSE("GPL");
49 #define EXTENT_MERGE_SIZE 5
51 static mode_t udf_convert_permissions(struct fileEntry *);
52 static int udf_update_inode(struct inode *, int);
53 static void udf_fill_inode(struct inode *, struct buffer_head *);
54 static struct buffer_head *inode_getblk(struct inode *, long, int *, long *, int *);
55 static void udf_split_extents(struct inode *, int *, int, int,
56 long_ad [EXTENT_MERGE_SIZE], int *);
57 static void udf_prealloc_extents(struct inode *, int, int,
58 long_ad [EXTENT_MERGE_SIZE], int *);
59 static void udf_merge_extents(struct inode *,
60 long_ad [EXTENT_MERGE_SIZE], int *);
61 static void udf_update_extents(struct inode *,
62 long_ad [EXTENT_MERGE_SIZE], int, int,
63 lb_addr, uint32_t, struct buffer_head **);
64 static int udf_get_block(struct inode *, long, struct buffer_head *, int);
72 * This routine is called whenever the kernel no longer needs the inode.
75 * July 1, 1997 - Andrew E. Mileski
76 * Written, tested, and released.
78 * Called at each iput()
80 void udf_put_inode(struct inode * inode)
82 if (!(inode->i_sb->s_flags & MS_RDONLY))
85 udf_discard_prealloc(inode);
94 * Clean-up before the specified inode is destroyed.
97 * This routine is called when the kernel destroys an inode structure
98 * ie. when iput() finds i_count == 0.
101 * July 1, 1997 - Andrew E. Mileski
102 * Written, tested, and released.
104 * Called at the last iput() if i_nlink is zero.
106 void udf_delete_inode(struct inode * inode)
110 if (is_bad_inode(inode))
115 udf_update_inode(inode, IS_SYNC(inode));
116 udf_free_inode(inode);
125 void udf_discard_prealloc(struct inode * inode)
127 if (inode->i_size && inode->i_size != UDF_I_LENEXTENTS(inode) &&
128 UDF_I_ALLOCTYPE(inode) != ICBTAG_FLAG_AD_IN_ICB)
130 udf_truncate_extents(inode);
134 static int udf_writepage(struct page *page)
136 return block_write_full_page(page, udf_get_block);
139 static int udf_readpage(struct file *file, struct page *page)
141 return block_read_full_page(page, udf_get_block);
144 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
146 return block_prepare_write(page, from, to, udf_get_block);
149 static int udf_bmap(struct address_space *mapping, long block)
151 return generic_block_bmap(mapping,block,udf_get_block);
154 struct address_space_operations udf_aops = {
155 readpage: udf_readpage,
156 writepage: udf_writepage,
157 sync_page: block_sync_page,
158 prepare_write: udf_prepare_write,
159 commit_write: generic_commit_write,
163 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
165 struct buffer_head *bh = NULL;
170 /* from now on we have normal address_space methods */
171 inode->i_data.a_ops = &udf_aops;
173 if (!UDF_I_LENALLOC(inode))
175 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
176 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
178 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
179 mark_inode_dirty(inode);
183 block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
184 bh = udf_tread(inode->i_sb, block);
187 page = grab_cache_page(inode->i_mapping, 0);
188 if (!PageLocked(page))
190 if (!Page_Uptodate(page))
193 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
194 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
195 memcpy(kaddr, bh->b_data + udf_file_entry_alloc_offset(inode),
196 UDF_I_LENALLOC(inode));
197 flush_dcache_page(page);
198 SetPageUptodate(page);
201 memset(bh->b_data + udf_file_entry_alloc_offset(inode),
202 0, UDF_I_LENALLOC(inode));
203 UDF_I_LENALLOC(inode) = 0;
204 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
205 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
207 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
208 mark_buffer_dirty_inode(bh, inode);
209 udf_release_data(bh);
211 inode->i_data.a_ops->writepage(page);
212 page_cache_release(page);
214 mark_inode_dirty(inode);
218 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
221 struct buffer_head *sbh = NULL, *dbh = NULL;
223 uint32_t elen, extoffset;
225 struct udf_fileident_bh sfibh, dfibh;
226 loff_t f_pos = udf_ext0_offset(inode) >> 2;
227 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
228 struct fileIdentDesc cfi, *sfi, *dfi;
232 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
233 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
235 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
236 mark_inode_dirty(inode);
240 /* alloc block, and copy data to it */
241 *block = udf_new_block(inode->i_sb, inode,
242 UDF_I_LOCATION(inode).partitionReferenceNum,
243 UDF_I_LOCATION(inode).logicalBlockNum, err);
247 newblock = udf_get_pblock(inode->i_sb, *block,
248 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
251 sbh = udf_tread(inode->i_sb, inode->i_ino);
254 dbh = udf_tgetblk(inode->i_sb, newblock);
258 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
259 mark_buffer_uptodate(dbh, 1);
261 mark_buffer_dirty_inode(dbh, inode);
263 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
264 sfibh.sbh = sfibh.ebh = sbh;
265 dfibh.soffset = dfibh.eoffset = 0;
266 dfibh.sbh = dfibh.ebh = dbh;
267 while ( (f_pos < size) )
269 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
272 udf_release_data(sbh);
273 udf_release_data(dbh);
276 sfi->descTag.tagLocation = *block;
277 dfibh.soffset = dfibh.eoffset;
278 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
279 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
280 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
281 sfi->fileIdent + sfi->lengthOfImpUse))
283 udf_release_data(sbh);
284 udf_release_data(dbh);
288 mark_buffer_dirty_inode(dbh, inode);
290 memset(sbh->b_data + udf_file_entry_alloc_offset(inode),
291 0, UDF_I_LENALLOC(inode));
293 UDF_I_LENALLOC(inode) = 0;
294 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
295 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
297 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
298 bloc = UDF_I_LOCATION(inode);
299 eloc.logicalBlockNum = *block;
300 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
301 elen = inode->i_size;
302 UDF_I_LENEXTENTS(inode) = elen;
303 extoffset = udf_file_entry_alloc_offset(inode);
304 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
307 mark_buffer_dirty(sbh);
308 udf_release_data(sbh);
309 mark_inode_dirty(inode);
314 static int udf_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create)
317 struct buffer_head *bh;
322 phys = udf_block_map(inode, block);
325 bh_result->b_dev = inode->i_dev;
326 bh_result->b_blocknr = phys;
327 bh_result->b_state |= (1UL << BH_Mapped);
341 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
343 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
344 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
349 bh = inode_getblk(inode, block, &err, &phys, &new);
357 bh_result->b_dev = inode->i_dev;
358 bh_result->b_blocknr = phys;
359 bh_result->b_state |= (1UL << BH_Mapped);
361 bh_result->b_state |= (1UL << BH_New);
367 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
371 struct buffer_head * udf_getblk(struct inode * inode, long block,
372 int create, int * err)
374 struct buffer_head dummy;
377 dummy.b_blocknr = -1000;
378 *err = udf_get_block(inode, block, &dummy, create);
379 if (!*err && buffer_mapped(&dummy))
381 struct buffer_head *bh;
382 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
383 if (buffer_new(&dummy))
386 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
387 mark_buffer_uptodate(bh, 1);
389 mark_buffer_dirty_inode(bh, inode);
396 static struct buffer_head * inode_getblk(struct inode * inode, long block,
397 int *err, long *phys, int *new)
399 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
400 long_ad laarr[EXTENT_MERGE_SIZE];
401 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
402 int count = 0, startnum = 0, endnum = 0;
404 lb_addr eloc, pbloc, cbloc, nbloc;
406 uint64_t lbcount = 0, b_off = 0;
407 uint32_t newblocknum, newblock, offset = 0;
409 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
412 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
413 b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
414 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
416 /* find the extent which contains the block we are looking for.
417 alternate between laarr[0] and laarr[1] for locations of the
418 current extent, and the previous extent */
423 udf_release_data(pbh);
424 atomic_inc(&cbh->b_count);
429 udf_release_data(cbh);
430 atomic_inc(&nbh->b_count);
439 pextoffset = cextoffset;
440 cextoffset = nextoffset;
442 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
447 laarr[c].extLength = (etype << 30) | elen;
448 laarr[c].extLocation = eloc;
450 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
451 pgoal = eloc.logicalBlockNum +
452 ((elen + inode->i_sb->s_blocksize - 1) >>
453 inode->i_sb->s_blocksize_bits);
456 } while (lbcount + elen <= b_off);
459 offset = b_off >> inode->i_sb->s_blocksize_bits;
461 /* if the extent is allocated and recorded, return the block
462 if the extent is not a multiple of the blocksize, round up */
464 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
466 if (elen & (inode->i_sb->s_blocksize - 1))
468 elen = EXT_RECORDED_ALLOCATED |
469 ((elen + inode->i_sb->s_blocksize - 1) &
470 ~(inode->i_sb->s_blocksize - 1));
471 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
473 udf_release_data(pbh);
474 udf_release_data(cbh);
475 udf_release_data(nbh);
476 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
483 endnum = startnum = ((count > 1) ? 1 : count);
484 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
487 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
488 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
489 inode->i_sb->s_blocksize - 1) &
490 ~(inode->i_sb->s_blocksize - 1));
491 UDF_I_LENEXTENTS(inode) =
492 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
493 ~(inode->i_sb->s_blocksize - 1);
496 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
497 ((offset + 1) << inode->i_sb->s_blocksize_bits);
498 memset(&laarr[c].extLocation, 0x00, sizeof(lb_addr));
504 endnum = startnum = ((count > 2) ? 2 : count);
506 /* if the current extent is in position 0, swap it with the previous */
507 if (!c && count != 1)
515 /* if the current block is located in a extent, read the next extent */
518 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
520 laarr[c+1].extLength = (etype << 30) | elen;
521 laarr[c+1].extLocation = eloc;
529 udf_release_data(nbh);
533 udf_release_data(cbh);
535 /* if the current extent is not recorded but allocated, get the
536 block in the extent corresponding to the requested block */
537 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
538 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
539 else /* otherwise, allocate a new block */
541 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
542 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
547 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
550 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
551 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
553 udf_release_data(pbh);
557 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
560 /* if the extent the requsted block is located in contains multiple blocks,
561 split the extent into at most three extents. blocks prior to requested
562 block, requested block, and blocks after requested block */
563 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
565 #ifdef UDF_PREALLOCATE
566 /* preallocate blocks */
567 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
570 /* merge any continuous blocks in laarr */
571 udf_merge_extents(inode, laarr, &endnum);
573 /* write back the new extents, inserting new extents if the new number
574 of extents is greater than the old number, and deleting extents if
575 the new number of extents is less than the old number */
576 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
578 udf_release_data(pbh);
580 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
581 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
588 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
589 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
590 inode->i_ctime = CURRENT_TIME;
591 UDF_I_UCTIME(inode) = CURRENT_UTIME;
594 udf_sync_inode(inode);
596 mark_inode_dirty(inode);
600 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
601 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
603 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
604 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
607 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
608 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
609 int type = laarr[curr].extLength & ~UDF_EXTENT_LENGTH_MASK;
613 else if (!offset || blen == offset + 1)
615 laarr[curr+2] = laarr[curr+1];
616 laarr[curr+1] = laarr[curr];
620 laarr[curr+3] = laarr[curr+1];
621 laarr[curr+2] = laarr[curr+1] = laarr[curr];
626 if ((type >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
628 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
629 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
630 (offset << inode->i_sb->s_blocksize_bits);
631 laarr[curr].extLocation.logicalBlockNum = 0;
632 laarr[curr].extLocation.partitionReferenceNum = 0;
635 laarr[curr].extLength = type |
636 (offset << inode->i_sb->s_blocksize_bits);
642 laarr[curr].extLocation.logicalBlockNum = newblocknum;
643 if ((type >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
644 laarr[curr].extLocation.partitionReferenceNum =
645 UDF_I_LOCATION(inode).partitionReferenceNum;
646 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
647 inode->i_sb->s_blocksize;
650 if (blen != offset + 1)
652 if ((type >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
653 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
654 laarr[curr].extLength = type |
655 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
662 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
663 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
665 int start, length = 0, currlength = 0, i;
667 if (*endnum >= (c+1))
676 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
679 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
680 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
686 for (i=start+1; i<=*endnum; i++)
691 length += UDF_DEFAULT_PREALLOC_BLOCKS;
693 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
694 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
695 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
702 int next = laarr[start].extLocation.logicalBlockNum +
703 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
704 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
705 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
706 laarr[start].extLocation.partitionReferenceNum,
707 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
708 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
713 laarr[start].extLength +=
714 (numalloc << inode->i_sb->s_blocksize_bits);
717 memmove(&laarr[c+2], &laarr[c+1],
718 sizeof(long_ad) * (*endnum - (c+1)));
720 laarr[c+1].extLocation.logicalBlockNum = next;
721 laarr[c+1].extLocation.partitionReferenceNum =
722 laarr[c].extLocation.partitionReferenceNum;
723 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
724 (numalloc << inode->i_sb->s_blocksize_bits);
728 for (i=start+1; numalloc && i<*endnum; i++)
730 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
731 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
735 laarr[c].extLength -=
736 (numalloc << inode->i_sb->s_blocksize_bits);
743 memmove(&laarr[i], &laarr[i+1],
744 sizeof(long_ad) * (*endnum - (i+1)));
749 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
754 static void udf_merge_extents(struct inode *inode,
755 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
759 for (i=0; i<(*endnum-1); i++)
761 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
763 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
764 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
765 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
766 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
768 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
769 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
770 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
772 laarr[i+1].extLength = (laarr[i+1].extLength -
773 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
774 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
775 laarr[i].extLength = (UDF_EXTENT_LENGTH_MASK + 1) -
776 inode->i_sb->s_blocksize;
777 laarr[i+1].extLocation.logicalBlockNum =
778 laarr[i].extLocation.logicalBlockNum +
779 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
780 inode->i_sb->s_blocksize_bits);
784 laarr[i].extLength = laarr[i+1].extLength +
785 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
786 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
788 memmove(&laarr[i+1], &laarr[i+2],
789 sizeof(long_ad) * (*endnum - (i+2)));
798 static void udf_update_extents(struct inode *inode,
799 long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
800 lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
806 if (startnum > endnum)
808 for (i=0; i<(startnum-endnum); i++)
810 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
811 laarr[i].extLength, *pbh);
814 else if (startnum < endnum)
816 for (i=0; i<(endnum-startnum); i++)
818 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
819 laarr[i].extLength, *pbh);
820 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
821 &laarr[i].extLength, pbh, 1);
826 for (i=start; i<endnum; i++)
828 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
829 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
830 laarr[i].extLength, *pbh, 1);
834 struct buffer_head * udf_bread(struct inode * inode, int block,
835 int create, int * err)
837 struct buffer_head * bh = NULL;
839 bh = udf_getblk(inode, block, create, err);
843 if (buffer_uptodate(bh))
845 ll_rw_block(READ, 1, &bh);
847 if (buffer_uptodate(bh))
854 void udf_truncate(struct inode * inode)
857 struct buffer_head *bh;
860 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
861 S_ISLNK(inode->i_mode)))
863 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
866 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
868 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
871 udf_expand_file_adinicb(inode, inode->i_size, &err);
872 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
874 inode->i_size = UDF_I_LENALLOC(inode);
878 udf_truncate_extents(inode);
882 offset = (inode->i_size & (inode->i_sb->s_blocksize - 1)) +
883 udf_file_entry_alloc_offset(inode);
885 if ((bh = udf_tread(inode->i_sb,
886 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0))))
888 memset(bh->b_data + offset, 0x00, inode->i_sb->s_blocksize - offset);
889 mark_buffer_dirty(bh);
890 udf_release_data(bh);
892 UDF_I_LENALLOC(inode) = inode->i_size;
897 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
898 udf_truncate_extents(inode);
901 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
902 UDF_I_UMTIME(inode) = UDF_I_UCTIME(inode) = CURRENT_UTIME;
904 udf_sync_inode (inode);
906 mark_inode_dirty(inode);
916 * This routine is called by iget() [which is called by udf_iget()]
917 * (clean_inode() will have been called first)
918 * when an inode is first read into memory.
921 * July 1, 1997 - Andrew E. Mileski
922 * Written, tested, and released.
924 * 12/19/98 dgb Updated to fix size problems.
928 udf_read_inode(struct inode *inode)
930 memset(&UDF_I_LOCATION(inode), 0xFF, sizeof(lb_addr));
934 __udf_read_inode(struct inode *inode)
936 struct buffer_head *bh = NULL;
937 struct fileEntry *fe;
941 * Set defaults, but the inode is still incomplete!
942 * Note: get_new_inode() sets the following on a new inode:
946 * i_flags = sb->s_flags
948 * clean_inode(): zero fills and sets
954 inode->i_blksize = PAGE_SIZE;
956 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
960 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
962 make_bad_inode(inode);
966 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
967 ident != TAG_IDENT_USE)
969 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
970 inode->i_ino, ident);
971 udf_release_data(bh);
972 make_bad_inode(inode);
976 fe = (struct fileEntry *)bh->b_data;
978 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
980 struct buffer_head *ibh = NULL, *nbh = NULL;
981 struct indirectEntry *ie;
983 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
984 if (ident == TAG_IDENT_IE)
989 ie = (struct indirectEntry *)ibh->b_data;
991 loc = lelb_to_cpu(ie->indirectICB.extLocation);
993 if (ie->indirectICB.extLength &&
994 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
996 if (ident == TAG_IDENT_FE ||
997 ident == TAG_IDENT_EFE)
999 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(lb_addr));
1000 udf_release_data(bh);
1001 udf_release_data(ibh);
1002 udf_release_data(nbh);
1003 __udf_read_inode(inode);
1008 udf_release_data(nbh);
1009 udf_release_data(ibh);
1013 udf_release_data(ibh);
1017 udf_release_data(ibh);
1019 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
1021 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1022 le16_to_cpu(fe->icbTag.strategyType));
1023 udf_release_data(bh);
1024 make_bad_inode(inode);
1027 udf_fill_inode(inode, bh);
1028 udf_release_data(bh);
1031 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1033 struct fileEntry *fe;
1034 struct extendedFileEntry *efe;
1039 inode->i_version = ++event;
1040 UDF_I_NEW_INODE(inode) = 0;
1042 fe = (struct fileEntry *)bh->b_data;
1043 efe = (struct extendedFileEntry *)bh->b_data;
1045 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1046 UDF_I_STRAT4096(inode) = 0;
1047 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1048 UDF_I_STRAT4096(inode) = 1;
1050 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1051 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1052 UDF_I_EXTENDED_FE(inode) = 1;
1053 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1054 UDF_I_EXTENDED_FE(inode) = 0;
1055 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1057 UDF_I_LENALLOC(inode) =
1059 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1063 inode->i_uid = le32_to_cpu(fe->uid);
1064 if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1066 inode->i_gid = le32_to_cpu(fe->gid);
1067 if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1069 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1070 if (!inode->i_nlink)
1073 inode->i_size = le64_to_cpu(fe->informationLength);
1074 UDF_I_LENEXTENTS(inode) = inode->i_size;
1076 inode->i_mode = udf_convert_permissions(fe);
1077 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1079 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1080 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1082 if (UDF_I_EXTENDED_FE(inode) == 0)
1084 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1085 (inode->i_sb->s_blocksize_bits - 9);
1087 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1088 lets_to_cpu(fe->accessTime)) )
1090 inode->i_atime = convtime;
1094 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1097 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1098 lets_to_cpu(fe->modificationTime)) )
1100 inode->i_mtime = convtime;
1101 UDF_I_UMTIME(inode) = convtime_usec;
1105 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1106 UDF_I_UMTIME(inode) = 0;
1109 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1110 lets_to_cpu(fe->attrTime)) )
1112 inode->i_ctime = convtime;
1113 UDF_I_UCTIME(inode) = convtime_usec;
1117 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1118 UDF_I_UCTIME(inode) = 0;
1121 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1122 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1123 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1124 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1125 alen = offset + UDF_I_LENALLOC(inode);
1129 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1130 (inode->i_sb->s_blocksize_bits - 9);
1132 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1133 lets_to_cpu(efe->accessTime)) )
1135 inode->i_atime = convtime;
1139 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1142 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1143 lets_to_cpu(efe->modificationTime)) )
1145 inode->i_mtime = convtime;
1146 UDF_I_UMTIME(inode) = convtime_usec;
1150 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1151 UDF_I_UMTIME(inode) = 0;
1154 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1155 lets_to_cpu(efe->createTime)) )
1157 UDF_I_CRTIME(inode) = convtime;
1158 UDF_I_UCRTIME(inode) = convtime_usec;
1162 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1163 UDF_I_UCRTIME(inode) = 0;
1166 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1167 lets_to_cpu(efe->attrTime)) )
1169 inode->i_ctime = convtime;
1170 UDF_I_UCTIME(inode) = convtime_usec;
1174 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1175 UDF_I_UCTIME(inode) = 0;
1178 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1179 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1180 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1181 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1182 alen = offset + UDF_I_LENALLOC(inode);
1185 switch (fe->icbTag.fileType)
1187 case ICBTAG_FILE_TYPE_DIRECTORY:
1189 inode->i_op = &udf_dir_inode_operations;
1190 inode->i_fop = &udf_dir_operations;
1191 inode->i_mode |= S_IFDIR;
1195 case ICBTAG_FILE_TYPE_REALTIME:
1196 case ICBTAG_FILE_TYPE_REGULAR:
1197 case ICBTAG_FILE_TYPE_UNDEF:
1199 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1200 inode->i_data.a_ops = &udf_adinicb_aops;
1202 inode->i_data.a_ops = &udf_aops;
1203 inode->i_op = &udf_file_inode_operations;
1204 inode->i_fop = &udf_file_operations;
1205 inode->i_mode |= S_IFREG;
1208 case ICBTAG_FILE_TYPE_BLOCK:
1210 inode->i_mode |= S_IFBLK;
1213 case ICBTAG_FILE_TYPE_CHAR:
1215 inode->i_mode |= S_IFCHR;
1218 case ICBTAG_FILE_TYPE_FIFO:
1220 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1223 case ICBTAG_FILE_TYPE_SYMLINK:
1225 inode->i_data.a_ops = &udf_symlink_aops;
1226 inode->i_op = &page_symlink_inode_operations;
1227 inode->i_mode = S_IFLNK|S_IRWXUGO;
1232 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1233 inode->i_ino, fe->icbTag.fileType);
1234 make_bad_inode(inode);
1238 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1240 struct buffer_head *tbh = NULL;
1241 struct deviceSpec *dsea =
1242 (struct deviceSpec *)
1243 udf_get_extendedattr(inode, 12, 1, &tbh);
1247 init_special_inode(inode, inode->i_mode,
1248 ((le32_to_cpu(dsea->majorDeviceIdent)) << 8) |
1249 (le32_to_cpu(dsea->minorDeviceIdent) & 0xFF));
1250 /* Developer ID ??? */
1251 udf_release_data(tbh);
1255 make_bad_inode(inode);
1261 udf_convert_permissions(struct fileEntry *fe)
1264 uint32_t permissions;
1267 permissions = le32_to_cpu(fe->permissions);
1268 flags = le16_to_cpu(fe->icbTag.flags);
1270 mode = (( permissions ) & S_IRWXO) |
1271 (( permissions >> 2 ) & S_IRWXG) |
1272 (( permissions >> 4 ) & S_IRWXU) |
1273 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1274 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1275 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1284 * Write out the specified inode.
1287 * This routine is called whenever an inode is synced.
1288 * Currently this routine is just a placeholder.
1291 * July 1, 1997 - Andrew E. Mileski
1292 * Written, tested, and released.
1295 void udf_write_inode(struct inode * inode, int sync)
1298 udf_update_inode(inode, sync);
1302 int udf_sync_inode(struct inode * inode)
1304 return udf_update_inode(inode, 1);
1308 udf_update_inode(struct inode *inode, int do_sync)
1310 struct buffer_head *bh = NULL;
1311 struct fileEntry *fe;
1312 struct extendedFileEntry *efe;
1320 bh = udf_tread(inode->i_sb,
1321 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1325 udf_debug("bread failure\n");
1328 fe = (struct fileEntry *)bh->b_data;
1329 efe = (struct extendedFileEntry *)bh->b_data;
1330 if (UDF_I_NEW_INODE(inode) == 1)
1332 if (UDF_I_EXTENDED_FE(inode) == 0)
1333 memset(bh->b_data, 0x00, sizeof(struct fileEntry));
1335 memset(bh->b_data, 0x00, sizeof(struct extendedFileEntry));
1336 memset(bh->b_data + udf_file_entry_alloc_offset(inode) +
1337 UDF_I_LENALLOC(inode), 0x0, inode->i_sb->s_blocksize -
1338 udf_file_entry_alloc_offset(inode) - UDF_I_LENALLOC(inode));
1339 UDF_I_NEW_INODE(inode) = 0;
1342 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1344 struct unallocSpaceEntry *use =
1345 (struct unallocSpaceEntry *)bh->b_data;
1347 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1348 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1350 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1351 use->descTag.descCRCLength = cpu_to_le16(crclen);
1352 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1354 use->descTag.tagChecksum = 0;
1355 for (i=0; i<16; i++)
1357 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1359 mark_buffer_dirty(bh);
1360 udf_release_data(bh);
1364 if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1365 fe->uid = cpu_to_le32(inode->i_uid);
1367 if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1368 fe->gid = cpu_to_le32(inode->i_gid);
1370 udfperms = ((inode->i_mode & S_IRWXO) ) |
1371 ((inode->i_mode & S_IRWXG) << 2) |
1372 ((inode->i_mode & S_IRWXU) << 4);
1374 udfperms |= (le32_to_cpu(fe->permissions) &
1375 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1376 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1377 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1378 fe->permissions = cpu_to_le32(udfperms);
1380 if (S_ISDIR(inode->i_mode))
1381 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1383 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1385 fe->informationLength = cpu_to_le64(inode->i_size);
1387 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1390 struct buffer_head *tbh = NULL;
1391 struct deviceSpec *dsea =
1392 (struct deviceSpec *)
1393 udf_get_extendedattr(inode, 12, 1, &tbh);
1397 dsea = (struct deviceSpec *)
1398 udf_add_extendedattr(inode,
1399 sizeof(struct deviceSpec) +
1400 sizeof(regid), 12, 0x3, &tbh);
1401 dsea->attrType = 12;
1402 dsea->attrSubtype = 1;
1403 dsea->attrLength = sizeof(struct deviceSpec) +
1405 dsea->impUseLength = sizeof(regid);
1407 eid = (regid *)dsea->impUse;
1408 memset(eid, 0, sizeof(regid));
1409 strcpy(eid->ident, UDF_ID_DEVELOPER);
1410 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1411 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1412 dsea->majorDeviceIdent = kdev_t_to_nr(inode->i_rdev) >> 8;
1413 dsea->minorDeviceIdent = kdev_t_to_nr(inode->i_rdev) & 0xFF;
1414 mark_buffer_dirty_inode(tbh, inode);
1415 udf_release_data(tbh);
1418 if (UDF_I_EXTENDED_FE(inode) == 0)
1420 fe->logicalBlocksRecorded = cpu_to_le64(
1421 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1422 (inode->i_sb->s_blocksize_bits - 9));
1424 if (udf_time_to_stamp(&cpu_time, inode->i_atime, 0))
1425 fe->accessTime = cpu_to_lets(cpu_time);
1426 if (udf_time_to_stamp(&cpu_time, inode->i_mtime, UDF_I_UMTIME(inode)))
1427 fe->modificationTime = cpu_to_lets(cpu_time);
1428 if (udf_time_to_stamp(&cpu_time, inode->i_ctime, UDF_I_UCTIME(inode)))
1429 fe->attrTime = cpu_to_lets(cpu_time);
1430 memset(&(fe->impIdent), 0, sizeof(regid));
1431 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1432 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1433 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1434 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1435 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1436 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1437 fe->descTag.tagIdent = le16_to_cpu(TAG_IDENT_FE);
1438 crclen = sizeof(struct fileEntry);
1442 efe->objectSize = cpu_to_le64(inode->i_size);
1443 efe->logicalBlocksRecorded = cpu_to_le64(
1444 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1445 (inode->i_sb->s_blocksize_bits - 9));
1447 if (UDF_I_CRTIME(inode) >= inode->i_atime)
1449 UDF_I_CRTIME(inode) = inode->i_atime;
1450 UDF_I_UCRTIME(inode) = 0;
1452 if (UDF_I_CRTIME(inode) > inode->i_mtime ||
1453 (UDF_I_CRTIME(inode) == inode->i_mtime &&
1454 UDF_I_UCRTIME(inode) > UDF_I_UMTIME(inode)))
1456 UDF_I_CRTIME(inode) = inode->i_mtime;
1457 UDF_I_UCRTIME(inode) = UDF_I_UMTIME(inode);
1459 if (UDF_I_CRTIME(inode) > inode->i_ctime ||
1460 (UDF_I_CRTIME(inode) == inode->i_ctime &&
1461 UDF_I_UCRTIME(inode) > UDF_I_UCTIME(inode)))
1463 UDF_I_CRTIME(inode) = inode->i_ctime;
1464 UDF_I_UCRTIME(inode) = UDF_I_UCTIME(inode);
1467 if (udf_time_to_stamp(&cpu_time, inode->i_atime, 0))
1468 efe->accessTime = cpu_to_lets(cpu_time);
1469 if (udf_time_to_stamp(&cpu_time, inode->i_mtime, UDF_I_UMTIME(inode)))
1470 efe->modificationTime = cpu_to_lets(cpu_time);
1471 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode), UDF_I_UCRTIME(inode)))
1472 efe->createTime = cpu_to_lets(cpu_time);
1473 if (udf_time_to_stamp(&cpu_time, inode->i_ctime, UDF_I_UCTIME(inode)))
1474 efe->attrTime = cpu_to_lets(cpu_time);
1476 memset(&(efe->impIdent), 0, sizeof(regid));
1477 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1478 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1479 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1480 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1481 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1482 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1483 efe->descTag.tagIdent = le16_to_cpu(TAG_IDENT_EFE);
1484 crclen = sizeof(struct extendedFileEntry);
1486 if (UDF_I_STRAT4096(inode))
1488 fe->icbTag.strategyType = cpu_to_le16(4096);
1489 fe->icbTag.strategyParameter = cpu_to_le16(1);
1490 fe->icbTag.numEntries = cpu_to_le16(2);
1494 fe->icbTag.strategyType = cpu_to_le16(4);
1495 fe->icbTag.numEntries = cpu_to_le16(1);
1498 if (S_ISDIR(inode->i_mode))
1499 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1500 else if (S_ISREG(inode->i_mode))
1501 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1502 else if (S_ISLNK(inode->i_mode))
1503 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1504 else if (S_ISBLK(inode->i_mode))
1505 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1506 else if (S_ISCHR(inode->i_mode))
1507 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1508 else if (S_ISFIFO(inode->i_mode))
1509 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1511 icbflags = UDF_I_ALLOCTYPE(inode) |
1512 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1513 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1514 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1515 (le16_to_cpu(fe->icbTag.flags) &
1516 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1517 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1519 fe->icbTag.flags = cpu_to_le16(icbflags);
1520 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1521 fe->descTag.descVersion = cpu_to_le16(3);
1523 fe->descTag.descVersion = cpu_to_le16(2);
1524 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1525 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1526 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1527 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1528 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1530 fe->descTag.tagChecksum = 0;
1531 for (i=0; i<16; i++)
1533 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1535 /* write the data blocks */
1536 mark_buffer_dirty(bh);
1539 ll_rw_block(WRITE, 1, &bh);
1541 if (buffer_req(bh) && !buffer_uptodate(bh))
1543 printk("IO error syncing udf inode [%s:%08lx]\n",
1544 bdevname(inode->i_dev), inode->i_ino);
1548 udf_release_data(bh);
1559 * This routine replaces iget() and read_inode().
1562 * October 3, 1997 - Andrew E. Mileski
1563 * Written, tested, and released.
1565 * 12/19/98 dgb Added semaphore and changed to be a wrapper of iget
1568 udf_iget(struct super_block *sb, lb_addr ino)
1570 struct inode *inode;
1571 unsigned long block;
1573 block = udf_get_lb_pblock(sb, ino, 0);
1577 inode = iget(sb, block);
1578 /* calls udf_read_inode() ! */
1582 printk(KERN_ERR "udf: iget() failed\n");
1585 else if (is_bad_inode(inode))
1590 else if (UDF_I_LOCATION(inode).logicalBlockNum == 0xFFFFFFFF &&
1591 UDF_I_LOCATION(inode).partitionReferenceNum == 0xFFFF)
1593 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(lb_addr));
1594 __udf_read_inode(inode);
1595 if (is_bad_inode(inode))
1602 if ( ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum) )
1604 udf_debug("block=%d, partition=%d out of range\n",
1605 ino.logicalBlockNum, ino.partitionReferenceNum);
1606 make_bad_inode(inode);
1614 int8_t udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1615 lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1618 short_ad *sad = NULL;
1619 long_ad *lad = NULL;
1620 struct allocExtDesc *aed;
1625 if (!(*bh = udf_tread(inode->i_sb,
1626 udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1628 udf_debug("reading block %d failed!\n",
1629 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1634 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1635 adsize = sizeof(short_ad);
1636 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1637 adsize = sizeof(long_ad);
1641 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1644 struct buffer_head *nbh;
1646 lb_addr obloc = *bloc;
1648 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, inode,
1649 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1653 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1659 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1660 mark_buffer_uptodate(nbh, 1);
1662 mark_buffer_dirty_inode(nbh, inode);
1664 aed = (struct allocExtDesc *)(nbh->b_data);
1665 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1666 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1667 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1669 loffset = *extoffset;
1670 aed->lengthAllocDescs = cpu_to_le32(adsize);
1671 sptr = (*bh)->b_data + *extoffset - adsize;
1672 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1673 memcpy(dptr, sptr, adsize);
1674 *extoffset = sizeof(struct allocExtDesc) + adsize;
1678 loffset = *extoffset + adsize;
1679 aed->lengthAllocDescs = cpu_to_le32(0);
1680 sptr = (*bh)->b_data + *extoffset;
1681 *extoffset = sizeof(struct allocExtDesc);
1683 if (memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
1685 aed = (struct allocExtDesc *)(*bh)->b_data;
1686 aed->lengthAllocDescs =
1687 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1691 UDF_I_LENALLOC(inode) += adsize;
1692 mark_inode_dirty(inode);
1695 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1696 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1697 bloc->logicalBlockNum, sizeof(tag));
1699 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1700 bloc->logicalBlockNum, sizeof(tag));
1701 switch (UDF_I_ALLOCTYPE(inode))
1703 case ICBTAG_FLAG_AD_SHORT:
1705 sad = (short_ad *)sptr;
1706 sad->extLength = cpu_to_le32(
1707 EXT_NEXT_EXTENT_ALLOCDECS |
1708 inode->i_sb->s_blocksize);
1709 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1712 case ICBTAG_FLAG_AD_LONG:
1714 lad = (long_ad *)sptr;
1715 lad->extLength = cpu_to_le32(
1716 EXT_NEXT_EXTENT_ALLOCDECS |
1717 inode->i_sb->s_blocksize);
1718 lad->extLocation = cpu_to_lelb(*bloc);
1719 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1723 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1724 udf_update_tag((*bh)->b_data, loffset);
1726 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1727 mark_buffer_dirty_inode(*bh, inode);
1728 udf_release_data(*bh);
1732 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1734 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1736 UDF_I_LENALLOC(inode) += adsize;
1737 mark_inode_dirty(inode);
1741 aed = (struct allocExtDesc *)(*bh)->b_data;
1742 aed->lengthAllocDescs =
1743 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1744 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1745 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1747 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1748 mark_buffer_dirty_inode(*bh, inode);
1754 int8_t udf_write_aext(struct inode *inode, lb_addr bloc, int *extoffset,
1755 lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1758 short_ad *sad = NULL;
1759 long_ad *lad = NULL;
1763 if (!(bh = udf_tread(inode->i_sb,
1764 udf_get_lb_pblock(inode->i_sb, bloc, 0))))
1766 udf_debug("reading block %d failed!\n",
1767 udf_get_lb_pblock(inode->i_sb, bloc, 0));
1772 atomic_inc(&bh->b_count);
1774 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1775 adsize = sizeof(short_ad);
1776 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1777 adsize = sizeof(long_ad);
1781 switch (UDF_I_ALLOCTYPE(inode))
1783 case ICBTAG_FLAG_AD_SHORT:
1785 sad = (short_ad *)((bh)->b_data + *extoffset);
1786 sad->extLength = cpu_to_le32(elen);
1787 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1790 case ICBTAG_FLAG_AD_LONG:
1792 lad = (long_ad *)((bh)->b_data + *extoffset);
1793 lad->extLength = cpu_to_le32(elen);
1794 lad->extLocation = cpu_to_lelb(eloc);
1795 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1800 if (memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
1802 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1804 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1805 udf_update_tag((bh)->b_data,
1806 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1808 mark_buffer_dirty_inode(bh, inode);
1812 mark_inode_dirty(inode);
1813 mark_buffer_dirty(bh);
1817 *extoffset += adsize;
1818 udf_release_data(bh);
1819 return (elen >> 30);
1822 int8_t udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1823 lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1831 if (!(*bh = udf_tread(inode->i_sb,
1832 udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1834 udf_debug("reading block %d failed!\n",
1835 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1840 tagIdent = le16_to_cpu(((tag *)(*bh)->b_data)->tagIdent);
1842 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1844 if (tagIdent == TAG_IDENT_FE || tagIdent == TAG_IDENT_EFE ||
1845 UDF_I_NEW_INODE(inode))
1847 pos = udf_file_entry_alloc_offset(inode);
1848 alen = UDF_I_LENALLOC(inode) + pos;
1850 else if (tagIdent == TAG_IDENT_USE)
1852 pos = sizeof(struct unallocSpaceEntry);
1853 alen = UDF_I_LENALLOC(inode) + pos;
1858 else if (tagIdent == TAG_IDENT_AED)
1860 struct allocExtDesc *aed = (struct allocExtDesc *)(*bh)->b_data;
1862 pos = sizeof(struct allocExtDesc);
1863 alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
1871 switch (UDF_I_ALLOCTYPE(inode))
1873 case ICBTAG_FLAG_AD_SHORT:
1877 if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
1880 if ((etype = le32_to_cpu(sad->extLength) >> 30) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1882 bloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1884 udf_release_data(*bh);
1886 return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
1890 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1891 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1892 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1896 case ICBTAG_FLAG_AD_LONG:
1900 if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
1903 if ((etype = le32_to_cpu(lad->extLength) >> 30) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1905 *bloc = lelb_to_cpu(lad->extLocation);
1907 udf_release_data(*bh);
1909 return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
1913 *eloc = lelb_to_cpu(lad->extLocation);
1914 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1918 case ICBTAG_FLAG_AD_IN_ICB:
1920 if (UDF_I_LENALLOC(inode) == 0)
1922 etype = (EXT_RECORDED_ALLOCATED >> 30);
1923 *eloc = UDF_I_LOCATION(inode);
1924 *elen = UDF_I_LENALLOC(inode);
1929 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1936 udf_debug("Empty Extent, inode=%ld, alloctype=%d, eloc=%d, elen=%d, etype=%d, extoffset=%d\n",
1937 inode->i_ino, UDF_I_ALLOCTYPE(inode), eloc->logicalBlockNum, *elen, etype, *extoffset);
1938 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1939 *extoffset -= sizeof(short_ad);
1940 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1941 *extoffset -= sizeof(long_ad);
1945 int8_t udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1946 lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1953 if (!(*bh = udf_tread(inode->i_sb,
1954 udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1956 udf_debug("reading block %d failed!\n",
1957 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1962 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1964 if (!(UDF_I_EXTENDED_FE(inode)))
1965 pos = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1967 pos = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1968 alen = UDF_I_LENALLOC(inode) + pos;
1972 struct allocExtDesc *aed = (struct allocExtDesc *)(*bh)->b_data;
1974 pos = sizeof(struct allocExtDesc);
1975 alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
1981 switch (UDF_I_ALLOCTYPE(inode))
1983 case ICBTAG_FLAG_AD_SHORT:
1987 if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
1990 etype = le32_to_cpu(sad->extLength) >> 30;
1991 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1992 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1993 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1996 case ICBTAG_FLAG_AD_LONG:
2000 if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
2003 etype = le32_to_cpu(lad->extLength) >> 30;
2004 *eloc = lelb_to_cpu(lad->extLocation);
2005 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
2010 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
2017 udf_debug("Empty Extent!\n");
2018 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
2019 *extoffset -= sizeof(short_ad);
2020 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
2021 *extoffset -= sizeof(long_ad);
2025 int8_t udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
2026 lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
2034 if (!(bh = udf_tread(inode->i_sb,
2035 udf_get_lb_pblock(inode->i_sb, bloc, 0))))
2037 udf_debug("reading block %d failed!\n",
2038 udf_get_lb_pblock(inode->i_sb, bloc, 0));
2043 atomic_inc(&bh->b_count);
2045 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
2047 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
2050 nelen = (etype << 30) | oelen;
2052 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
2053 udf_release_data(bh);
2054 return (nelen >> 30);
2057 int8_t udf_delete_aext(struct inode *inode, lb_addr nbloc, int nextoffset,
2058 lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
2060 struct buffer_head *obh;
2062 int oextoffset, adsize;
2064 struct allocExtDesc *aed;
2068 if (!(nbh = udf_tread(inode->i_sb,
2069 udf_get_lb_pblock(inode->i_sb, nbloc, 0))))
2071 udf_debug("reading block %d failed!\n",
2072 udf_get_lb_pblock(inode->i_sb, nbloc, 0));
2077 atomic_inc(&nbh->b_count);
2078 atomic_inc(&nbh->b_count);
2080 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
2081 adsize = sizeof(short_ad);
2082 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
2083 adsize = sizeof(long_ad);
2089 oextoffset = nextoffset;
2091 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
2094 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
2096 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
2097 if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
2100 udf_release_data(obh);
2101 atomic_inc(&nbh->b_count);
2103 oextoffset = nextoffset - adsize;
2106 memset(&eloc, 0x00, sizeof(lb_addr));
2109 if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
2111 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
2112 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
2113 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
2114 if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
2116 UDF_I_LENALLOC(inode) -= (adsize * 2);
2117 mark_inode_dirty(inode);
2121 aed = (struct allocExtDesc *)(obh)->b_data;
2122 aed->lengthAllocDescs =
2123 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
2124 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2125 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
2127 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
2128 mark_buffer_dirty_inode(obh, inode);
2133 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
2134 if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
2136 UDF_I_LENALLOC(inode) -= adsize;
2137 mark_inode_dirty(inode);
2141 aed = (struct allocExtDesc *)(obh)->b_data;
2142 aed->lengthAllocDescs =
2143 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
2144 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2145 udf_update_tag((obh)->b_data, oextoffset - adsize);
2147 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
2148 mark_buffer_dirty_inode(obh, inode);
2152 udf_release_data(nbh);
2153 udf_release_data(obh);
2154 return (elen >> 30);
2157 int8_t inode_bmap(struct inode *inode, int block, lb_addr *bloc, uint32_t *extoffset,
2158 lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
2160 uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
2165 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
2170 printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
2176 *bloc = UDF_I_LOCATION(inode);
2180 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
2182 *offset = bcount - lbcount;
2183 UDF_I_LENEXTENTS(inode) = lbcount;
2187 } while (lbcount <= bcount);
2189 *offset = bcount + *elen - lbcount;
2194 long udf_block_map(struct inode *inode, long block)
2197 uint32_t offset, extoffset, elen;
2198 struct buffer_head *bh = NULL;
2203 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
2204 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
2211 udf_release_data(bh);
2213 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2214 return udf_fixed_to_variable(ret);