diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ceph/addr.c | 7 | ||||
-rw-r--r-- | fs/ceph/caps.c | 14 | ||||
-rw-r--r-- | fs/ceph/file.c | 5 | ||||
-rw-r--r-- | fs/ceph/inode.c | 6 | ||||
-rw-r--r-- | fs/ceph/super.h | 2 | ||||
-rw-r--r-- | fs/ceph/xattr.c | 12 | ||||
-rw-r--r-- | fs/cifs/connect.c | 120 | ||||
-rw-r--r-- | fs/cifs/sess.c | 19 | ||||
-rw-r--r-- | fs/hpfs/Kconfig | 1 | ||||
-rw-r--r-- | fs/hpfs/alloc.c | 118 | ||||
-rw-r--r-- | fs/hpfs/anode.c | 138 | ||||
-rw-r--r-- | fs/hpfs/buffer.c | 24 | ||||
-rw-r--r-- | fs/hpfs/dir.c | 22 | ||||
-rw-r--r-- | fs/hpfs/dnode.c | 174 | ||||
-rw-r--r-- | fs/hpfs/ea.c | 136 | ||||
-rw-r--r-- | fs/hpfs/file.c | 31 | ||||
-rw-r--r-- | fs/hpfs/hpfs.h | 439 | ||||
-rw-r--r-- | fs/hpfs/hpfs_fn.h | 80 | ||||
-rw-r--r-- | fs/hpfs/inode.c | 47 | ||||
-rw-r--r-- | fs/hpfs/map.c | 56 | ||||
-rw-r--r-- | fs/hpfs/name.c | 33 | ||||
-rw-r--r-- | fs/hpfs/namei.c | 106 | ||||
-rw-r--r-- | fs/hpfs/super.c | 118 | ||||
-rw-r--r-- | fs/partitions/efi.c | 6 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 12 |
25 files changed, 846 insertions, 880 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index e159c529fd2..38b8ab55492 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -775,6 +775,13 @@ get_more_pages: ci->i_truncate_seq, ci->i_truncate_size, &inode->i_mtime, true, 1, 0); + + if (!req) { + rc = -ENOMEM; + unlock_page(page); + break; + } + max_pages = req->r_num_pages; alloc_page_vec(fsc, req); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 5323c330bbf..9fa08662a88 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1331,10 +1331,11 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci) } /* - * Mark caps dirty. If inode is newly dirty, add to the global dirty - * list. + * Mark caps dirty. If inode is newly dirty, return the dirty flags. + * Caller is then responsible for calling __mark_inode_dirty with the + * returned flags value. */ -void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) +int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) { struct ceph_mds_client *mdsc = ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; @@ -1357,7 +1358,7 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) list_add(&ci->i_dirty_item, &mdsc->cap_dirty); spin_unlock(&mdsc->cap_dirty_lock); if (ci->i_flushing_caps == 0) { - igrab(inode); + ihold(inode); dirty |= I_DIRTY_SYNC; } } @@ -1365,9 +1366,8 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && (mask & CEPH_CAP_FILE_BUFFER)) dirty |= I_DIRTY_DATASYNC; - if (dirty) - __mark_inode_dirty(inode, dirty); __cap_delay_requeue(mdsc, ci); + return dirty; } /* @@ -1991,7 +1991,7 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got) ci->i_wr_ref++; if (got & CEPH_CAP_FILE_BUFFER) { if (ci->i_wrbuffer_ref == 0) - igrab(&ci->vfs_inode); + ihold(&ci->vfs_inode); ci->i_wrbuffer_ref++; dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n", &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 159b512d5a2..203252d88d9 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -734,9 +734,12 @@ retry_snap: } } if (ret >= 0) { + int dirty; spin_lock(&inode->i_lock); - __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); spin_unlock(&inode->i_lock); + if (dirty) + __mark_inode_dirty(inode, dirty); } out: diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index b54c97da1c4..03d6dafda61 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1567,6 +1567,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) int release = 0, dirtied = 0; int mask = 0; int err = 0; + int inode_dirty_flags = 0; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; @@ -1725,13 +1726,16 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) dout("setattr %p ATTR_FILE ... hrm!\n", inode); if (dirtied) { - __ceph_mark_dirty_caps(ci, dirtied); + inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied); inode->i_ctime = CURRENT_TIME; } release &= issued; spin_unlock(&inode->i_lock); + if (inode_dirty_flags) + __mark_inode_dirty(inode, inode_dirty_flags); + if (mask) { req->r_inode = igrab(inode); req->r_inode_drop = release; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 619fe719968..b1f1b8bb127 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -506,7 +506,7 @@ static inline int __ceph_caps_dirty(struct ceph_inode_info *ci) { return ci->i_dirty_caps | ci->i_flushing_caps; } -extern void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask); +extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask); extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask); extern int __ceph_caps_used(struct ceph_inode_info *ci); diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 8c9eba6ef9d..f2b62869618 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -703,6 +703,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name, struct ceph_inode_xattr *xattr = NULL; int issued; int required_blob_size; + int dirty; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; @@ -763,11 +764,12 @@ retry: dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued)); err = __set_xattr(ci, newname, name_len, newval, val_len, 1, 1, 1, &xattr); - __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); ci->i_xattrs.dirty = true; inode->i_ctime = CURRENT_TIME; spin_unlock(&inode->i_lock); - + if (dirty) + __mark_inode_dirty(inode, dirty); return err; do_sync: @@ -810,6 +812,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name) struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode); int issued; int err; + int dirty; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; @@ -833,12 +836,13 @@ int ceph_removexattr(struct dentry *dentry, const char *name) goto do_sync; err = __remove_xattr_by_name(ceph_inode(inode), name); - __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); ci->i_xattrs.dirty = true; inode->i_ctime = CURRENT_TIME; spin_unlock(&inode->i_lock); - + if (dirty) + __mark_inode_dirty(inode, dirty); return err; do_sync: spin_unlock(&inode->i_lock); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 4bc862a80ef..05f1dcf7d79 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -274,7 +274,8 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) char *data_area_of_target; char *data_area_of_buf2; int remaining; - __u16 byte_count, total_data_size, total_in_buf, total_in_buf2; + unsigned int byte_count, total_in_buf; + __u16 total_data_size, total_in_buf2; total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); @@ -287,7 +288,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) remaining = total_data_size - total_in_buf; if (remaining < 0) - return -EINVAL; + return -EPROTO; if (remaining == 0) /* nothing to do, ignore */ return 0; @@ -308,20 +309,29 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) data_area_of_target += total_in_buf; /* copy second buffer into end of first buffer */ - memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); total_in_buf += total_in_buf2; + /* is the result too big for the field? */ + if (total_in_buf > USHRT_MAX) + return -EPROTO; put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); + + /* fix up the BCC */ byte_count = get_bcc_le(pTargetSMB); byte_count += total_in_buf2; + /* is the result too big for the field? */ + if (byte_count > USHRT_MAX) + return -EPROTO; put_bcc_le(byte_count, pTargetSMB); byte_count = pTargetSMB->smb_buf_length; byte_count += total_in_buf2; - - /* BB also add check that we are not beyond maximum buffer size */ - + /* don't allow buffer to overflow */ + if (byte_count > CIFSMaxBufSize) + return -ENOBUFS; pTargetSMB->smb_buf_length = byte_count; + memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); + if (remaining == total_in_buf2) { cFYI(1, "found the last secondary response"); return 0; /* we are done */ @@ -607,59 +617,63 @@ incomplete_rcv: list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); - if ((mid_entry->mid == smb_buffer->Mid) && - (mid_entry->midState == MID_REQUEST_SUBMITTED) && - (mid_entry->command == smb_buffer->Command)) { - if (length == 0 && - check2ndT2(smb_buffer, server->maxBuf) > 0) { - /* We have a multipart transact2 resp */ - isMultiRsp = true; - if (mid_entry->resp_buf) { - /* merge response - fix up 1st*/ - if (coalesce_t2(smb_buffer, - mid_entry->resp_buf)) { - mid_entry->multiRsp = - true; - break; - } else { - /* all parts received */ - mid_entry->multiEnd = - true; - goto multi_t2_fnd; - } + if (mid_entry->mid != smb_buffer->Mid || + mid_entry->midState != MID_REQUEST_SUBMITTED || + mid_entry->command != smb_buffer->Command) { + mid_entry = NULL; + continue; + } + + if (length == 0 && + check2ndT2(smb_buffer, server->maxBuf) > 0) { + /* We have a multipart transact2 resp */ + isMultiRsp = true; + if (mid_entry->resp_buf) { + /* merge response - fix up 1st*/ + length = coalesce_t2(smb_buffer, + mid_entry->resp_buf); + if (length > 0) { + length = 0; + mid_entry->multiRsp = true; + break; } else { - if (!isLargeBuf) { - cERROR(1, "1st trans2 resp needs bigbuf"); - /* BB maybe we can fix this up, switch - to already allocated large buffer? */ - } else { - /* Have first buffer */ - mid_entry->resp_buf = - smb_buffer; - mid_entry->largeBuf = - true; - bigbuf = NULL; - } + /* all parts received or + * packet is malformed + */ + mid_entry->multiEnd = true; + goto multi_t2_fnd; + } + } else { + if (!isLargeBuf) { + /* + * FIXME: switch to already + * allocated largebuf? + */ + cERROR(1, "1st trans2 resp " + "needs bigbuf"); + } else { + /* Have first buffer */ + mid_entry->resp_buf = + smb_buffer; + mid_entry->largeBuf = true; + bigbuf = NULL; } - break; } - mid_entry->resp_buf = smb_buffer; - mid_entry->largeBuf = isLargeBuf; + break; + } + mid_entry->resp_buf = smb_buffer; + mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: - if (length == 0) - mid_entry->midState = - MID_RESPONSE_RECEIVED; - else - mid_entry->midState = - MID_RESPONSE_MALFORMED; + if (length == 0) + mid_entry->midState = MID_RESPONSE_RECEIVED; + else + mid_entry->midState = MID_RESPONSE_MALFORMED; #ifdef CONFIG_CIFS_STATS2 - mid_entry->when_received = jiffies; + mid_entry->when_received = jiffies; #endif - list_del_init(&mid_entry->qhead); - mid_entry->callback(mid_entry); - break; - } - mid_entry = NULL; + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); + break; } spin_unlock(&GlobalMid_Lock); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index f6728eb6f4b..645114ad0a1 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -276,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, } static void -decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, +decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, const struct nls_table *nls_cp) { int len; @@ -284,19 +284,6 @@ decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, cFYI(1, "bleft %d", bleft); - /* - * Windows servers do not always double null terminate their final - * Unicode string. Check to see if there are an uneven number of bytes - * left. If so, then add an extra NULL pad byte to the end of the - * response. - * - * See section 2.7.2 in "Implementing CIFS" for details - */ - if (bleft % 2) { - data[bleft] = 0; - ++bleft; - } - kfree(ses->serverOS); ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); cFYI(1, "serverOS=%s", ses->serverOS); @@ -929,7 +916,9 @@ ssetup_ntlmssp_authenticate: } /* BB check if Unicode and decode strings */ - if (smb_buf->Flags2 & SMBFLG2_UNICODE) { + if (bytes_remaining == 0) { + /* no string area to decode, do nothing */ + } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { ++bcc_ptr; diff --git a/fs/hpfs/Kconfig b/fs/hpfs/Kconfig index 0c39dc3ef7d..56bd15c5bf6 100644 --- a/fs/hpfs/Kconfig +++ b/fs/hpfs/Kconfig @@ -1,7 +1,6 @@ config HPFS_FS tristate "OS/2 HPFS file system support" depends on BLOCK - depends on BROKEN || !PREEMPT help OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS is the file system used for organizing files on OS/2 hard disk diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c index 5503e2c2891..7a5eb2c718c 100644 --- a/fs/hpfs/alloc.c +++ b/fs/hpfs/alloc.c @@ -8,8 +8,6 @@ #include "hpfs_fn.h" -static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec); - /* * Check if a sector is allocated in bitmap * This is really slow. Turned on only if chk==2 @@ -18,9 +16,9 @@ static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec); static int chk_if_allocated(struct super_block *s, secno sec, char *msg) { struct quad_buffer_head qbh; - unsigned *bmp; + u32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; - if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f)) & 1) { + if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); goto fail1; } @@ -28,7 +26,7 @@ static int chk_if_allocated(struct super_block *s, secno sec, char *msg) if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4; if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail; - if ((bmp[ssec >> 5] >> (ssec & 0x1f)) & 1) { + if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec); goto fail1; } @@ -75,7 +73,6 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne hpfs_error(s, "Bad allocation size: %d", n); return 0; } - lock_super(s); if (bs != ~0x3fff) { if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls; } else { @@ -85,10 +82,6 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne ret = bs + nr; goto rt; } - /*if (!tstbits(bmp, nr + n, n + forward)) { - ret = bs + nr + n; - goto rt; - }*/ q = nr + n; b = 0; while ((a = tstbits(bmp, q, n + forward)) != 0) { q += a; @@ -105,14 +98,14 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne goto rt; } nr >>= 5; - /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) {*/ + /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */ i = nr; do { - if (!bmp[i]) goto cont; - if (n + forward >= 0x3f && bmp[i] != -1) goto cont; + if (!le32_to_cpu(bmp[i])) goto cont; + if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont; q = i<<5; if (i > 0) { - unsigned k = bmp[i-1]; + unsigned k = le32_to_cpu(bmp[i-1]); while (k & 0x80000000) { q--; k <<= 1; } @@ -132,18 +125,17 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne } while (i != nr); rt: if (ret) { - if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (bmp[(ret & 0x3fff) >> 5] | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { + if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret); ret = 0; goto b; } - bmp[(ret & 0x3fff) >> 5] &= ~(((1 << n) - 1) << (ret & 0x1f)); + bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f))); hpfs_mark_4buffers_dirty(&qbh); } b: hpfs_brelse4(&qbh); uls: - unlock_super(s); return ret; } @@ -155,7 +147,7 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne * sectors */ -secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward, int lock) +secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward) { secno sec; int i; @@ -167,7 +159,6 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa forward = -forward; f_p = 1; } - if (lock) hpfs_lock_creation(s); n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14; if (near && near < sbi->sb_fs_size) { if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret; @@ -214,18 +205,17 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa ret: if (sec && f_p) { for (i = 0; i < forward; i++) { - if (!hpfs_alloc_if_possible_nolock(s, sec + i + 1)) { + if (!hpfs_alloc_if_possible(s, sec + i + 1)) { hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i); sec = 0; break; } } } - if (lock) hpfs_unlock_creation(s); return sec; } -static secno alloc_in_dirband(struct super_block *s, secno near, int lock) +static secno alloc_in_dirband(struct super_block *s, secno near) { unsigned nr = near; secno sec; @@ -236,49 +226,35 @@ static secno alloc_in_dirband(struct super_block *s, secno near, int lock) nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4; nr -= sbi->sb_dirband_start; nr >>= 2; - if (lock) hpfs_lock_creation(s); sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0); - if (lock) hpfs_unlock_creation(s); if (!sec) return 0; return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start; } /* Alloc sector if it's free */ -static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec) +int hpfs_alloc_if_possible(struct super_block *s, secno sec) { struct quad_buffer_head qbh; - unsigned *bmp; - lock_super(s); + u32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; - if (bmp[(sec & 0x3fff) >> 5] & (1 << (sec & 0x1f))) { - bmp[(sec & 0x3fff) >> 5] &= ~(1 << (sec & 0x1f)); + if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) { + bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f))); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); - unlock_super(s); return 1; } hpfs_brelse4(&qbh); end: - unlock_super(s); return 0; } -int hpfs_alloc_if_possible(struct super_block *s, secno sec) -{ - int r; - hpfs_lock_creation(s); - r = hpfs_alloc_if_possible_nolock(s, sec); - hpfs_unlock_creation(s); - return r; -} - /* Free sectors in bitmaps */ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) { struct quad_buffer_head qbh; - unsigned *bmp; + u32 *bmp; struct hpfs_sb_info *sbi = hpfs_sb(s); /*printk("2 - ");*/ if (!n) return; @@ -286,26 +262,22 @@ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) hpfs_error(s, "Trying to free reserved sector %08x", sec); return; } - lock_super(s); sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n; if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff; new_map: if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) { - unlock_super(s); return; } new_tst: - if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f) & 1)) { + if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) { hpfs_error(s, "sector %08x not allocated", sec); hpfs_brelse4(&qbh); - unlock_super(s); return; } - bmp[(sec & 0x3fff) >> 5] |= 1 << (sec & 0x1f); + bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f)); if (!--n) { hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); - unlock_super(s); return; } if (!(++sec & 0x3fff)) { @@ -327,13 +299,13 @@ int hpfs_check_free_dnodes(struct super_block *s, int n) int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; int i, j; - unsigned *bmp; + u32 *bmp; struct quad_buffer_head qbh; if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { for (j = 0; j < 512; j++) { unsigned k; - if (!bmp[j]) continue; - for (k = bmp[j]; k; k >>= 1) if (k & 1) if (!--n) { + if (!le32_to_cpu(bmp[j])) continue; + for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) { hpfs_brelse4(&qbh); return 0; } @@ -352,10 +324,10 @@ int hpfs_check_free_dnodes(struct super_block *s, int n) chk_bmp: if (bmp) { for (j = 0; j < 512; j++) { - unsigned k; - if (!bmp[j]) continue; + u32 k; + if (!le32_to_cpu(bmp[j])) continue; for (k = 0xf; k; k <<= 4) - if ((bmp[j] & k) == k) { + if ((le32_to_cpu(bmp[j]) & k) == k) { if (!--n) { hpfs_brelse4(&qbh); return 0; @@ -379,44 +351,40 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno) hpfs_free_sectors(s, dno, 4); } else { struct quad_buffer_head qbh; - unsigned *bmp; + u32 *bmp; unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; - lock_super(s); if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { - unlock_super(s); return; } - bmp[ssec >> 5] |= 1 << (ssec & 0x1f); + bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f)); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); - unlock_super(s); } } struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near, - dnode_secno *dno, struct quad_buffer_head *qbh, - int lock) + dnode_secno *dno, struct quad_buffer_head *qbh) { struct dnode *d; if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) { - if (!(*dno = alloc_in_dirband(s, near, lock))) - if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) return NULL; + if (!(*dno = alloc_in_dirband(s, near))) + if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL; } else { - if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) - if (!(*dno = alloc_in_dirband(s, near, lock))) return NULL; + if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) + if (!(*dno = alloc_in_dirband(s, near))) return NULL; } if (!(d = hpfs_get_4sectors(s, *dno, qbh))) { hpfs_free_dnode(s, *dno); return NULL; } memset(d, 0, 2048); - d->magic = DNODE_MAGIC; - d->first_free = 52; + d->magic = cpu_to_le32(DNODE_MAGIC); + d->first_free = cpu_to_le32(52); d->dirent[0] = 32; d->dirent[2] = 8; d->dirent[30] = 1; d->dirent[31] = 255; - d->self = *dno; + d->self = cpu_to_le32(*dno); return d; } @@ -424,16 +392,16 @@ struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *f struct buffer_head **bh) { struct fnode *f; - if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD, 1))) return NULL; + if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL; if (!(f = hpfs_get_sector(s, *fno, bh))) { hpfs_free_sectors(s, *fno, 1); return NULL; } memset(f, 0, 512); - f->magic = FNODE_MAGIC; - f->ea_offs = 0xc4; + f->magic = cpu_to_le32(FNODE_MAGIC); + f->ea_offs = cpu_to_le16(0xc4); f->btree.n_free_nodes = 8; - f->btree.first_free = 8; + f->btree.first_free = cpu_to_le16(8); return f; } @@ -441,16 +409,16 @@ struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *a struct buffer_head **bh) { struct anode *a; - if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD, 1))) return NULL; + if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL; if (!(a = hpfs_get_sector(s, *ano, bh))) { hpfs_free_sectors(s, *ano, 1); return NULL; } memset(a, 0, 512); - a->magic = ANODE_MAGIC; - a->self = *ano; + a->magic = cpu_to_le32(ANODE_MAGIC); + a->self = cpu_to_le32(*ano); a->btree.n_free_nodes = 40; a->btree.n_used_nodes = 0; - a->btree.first_free = 8; + a->btree.first_free = cpu_to_le16(8); return a; } diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c index 6a2f04bf3df..08b503e8ed2 100644 --- a/fs/hpfs/anode.c +++ b/fs/hpfs/anode.c @@ -22,8 +22,8 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; if (btree->internal) { for (i = 0; i < btree->n_used_nodes; i++) - if (btree->u.internal[i].file_secno > sec) { - a = btree->u.internal[i].down; + if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { + a = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; @@ -34,18 +34,18 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, return -1; } for (i = 0; i < btree->n_used_nodes; i++) - if (btree->u.external[i].file_secno <= sec && - btree->u.external[i].file_secno + btree->u.external[i].length > sec) { - a = btree->u.external[i].disk_secno + sec - btree->u.external[i].file_secno; + if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && + le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { + a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { brelse(bh); return -1; } if (inode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); - hpfs_inode->i_file_sec = btree->u.external[i].file_secno; - hpfs_inode->i_disk_sec = btree->u.external[i].disk_secno; - hpfs_inode->i_n_secs = btree->u.external[i].length; + hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno); + hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno); + hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length); } brelse(bh); return a; @@ -83,8 +83,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi return -1; } if (btree->internal) { - a = btree->u.internal[n].down; - btree->u.internal[n].file_secno = -1; + a = le32_to_cpu(btree->u.internal[n].down); + btree->u.internal[n].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if (hpfs_sb(s)->sb_chk) @@ -94,15 +94,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi goto go_down; } if (n >= 0) { - if (btree->u.external[n].file_secno + btree->u.external[n].length != fsecno) { + if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) { hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", - btree->u.external[n].file_secno + btree->u.external[n].length, fsecno, + le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno, fnod?'f':'a', node); brelse(bh); return -1; } - if (hpfs_alloc_if_possible(s, se = btree->u.external[n].disk_secno + btree->u.external[n].length)) { - btree->u.external[n].length++; + if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { + btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1); mark_buffer_dirty(bh); brelse(bh); return se; @@ -115,20 +115,20 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi } se = !fnod ? node : (node + 16384) & ~16383; } - if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M, 1))) { + if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) { brelse(bh); return -1; } - fs = n < 0 ? 0 : btree->u.external[n].file_secno + btree->u.external[n].length; + fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length); if (!btree->n_free_nodes) { - up = a != node ? anode->up : -1; + up = a != node ? le32_to_cpu(anode->up) : -1; if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { brelse(bh); hpfs_free_sectors(s, se, 1); return -1; } if (a == node && fnod) { - anode->up = node; + anode->up = cpu_to_le32(node); anode->btree.fnode_parent = 1; anode->btree.n_used_nodes = btree->n_used_nodes; anode->btree.first_free = btree->first_free; @@ -137,9 +137,9 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi btree->internal = 1; btree->n_free_nodes = 11; btree->n_used_nodes = 1; - btree->first_free = (char *)&(btree-> |