]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
hfsplus: lift the 2TB size limit
authorChristoph Hellwig <hch@tuxera.com>
Wed, 16 Feb 2011 08:34:26 +0000 (09:34 +0100)
committerChristoph Hellwig <hch@lst.de>
Thu, 30 Jun 2011 11:40:59 +0000 (13:40 +0200)
Replace the hardcoded 2TB limit with a dynamic limit based on the block
size now that we have fixed the few overflows preventing operation
with large volumes.

Signed-off-by: Christoph Hellwig <hch@tuxera.com>
fs/hfsplus/super.c
fs/hfsplus/wrapper.c

index 84a47b709f51835de4dc368e30318abddbbce226..acaef57e3606165cf096e13f8fac3566e6153871 100644 (file)
@@ -393,6 +393,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        if (!sbi->rsrc_clump_blocks)
                sbi->rsrc_clump_blocks = 1;
 
+       err = generic_check_addressable(sbi->alloc_blksz_shift,
+                                       sbi->total_blocks);
+       if (err) {
+               printk(KERN_ERR "hfs: filesystem size too large.\n");
+               goto out_free_vhdr;
+       }
+
        /* Set up operations so we can load metadata */
        sb->s_op = &hfsplus_sops;
        sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -417,6 +424,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                sb->s_flags |= MS_RDONLY;
        }
 
+       err = -EINVAL;
+
        /* Load metadata objects (B*Trees) */
        sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
        if (!sbi->ext_tree) {
index 4b86468125c155f4af29ed230bc84f60da10b3dc..2f933e83f5c5f6212842f0a0146a802b7b2a605b 100644 (file)
@@ -141,10 +141,6 @@ int hfsplus_read_wrapper(struct super_block *sb)
 
        if (hfsplus_get_last_session(sb, &part_start, &part_size))
                goto out;
-       if ((u64)part_start + part_size > 0x100000000ULL) {
-               pr_err("hfs: volumes larger than 2TB are not supported yet\n");
-               goto out;
-       }
 
        error = -ENOMEM;
        sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);