aboutsummaryrefslogtreecommitdiff
path: root/net/irda/irlap.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/irda/irlap.c')
-rw-r--r--net/irda/irlap.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 005b424494a..a778df55f5d 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -23,9 +23,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
********************************************************************/
> -rw-r--r--fs/xfs/linux-2.6/sv.h73
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c1586
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c1854
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h446
-rw-r--r--fs/xfs/linux-2.6/xfs_dmapi_priv.h28
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c488
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c81
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.h25
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.h24
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c1285
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c488
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c917
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c920
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h77
-rw-r--r--fs/xfs/linux-2.6/xfs_stats.c114
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c1488
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.h77
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c213
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h267
-rw-r--r--fs/xfs/mrlock.h (renamed from fs/xfs/linux-2.6/mrlock.h)12
-rw-r--r--fs/xfs/quota/xfs_dquot.c1602
-rw-r--r--fs/xfs/quota/xfs_dquot.h185
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c676
-rw-r--r--fs/xfs/quota/xfs_qm.c2835
-rw-r--r--fs/xfs/quota/xfs_qm.h210
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c261
-rw-r--r--fs/xfs/quota/xfs_qm_stats.c130
-rw-r--r--fs/xfs/quota/xfs_qm_stats.h53
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c1476
-rw-r--r--fs/xfs/quota/xfs_quota_priv.h168
-rw-r--r--fs/xfs/support/debug.c88
-rw-r--r--fs/xfs/support/debug.h74
-rw-r--r--fs/xfs/support/ktrace.c328
-rw-r--r--fs/xfs/support/ktrace.h85
-rw-r--r--fs/xfs/time.h (renamed from fs/xfs/linux-2.6/time.h)0
-rw-r--r--fs/xfs/uuid.c (renamed from fs/xfs/support/uuid.c)77
-rw-r--r--fs/xfs/uuid.h (renamed from fs/xfs/support/uuid.h)11
-rw-r--r--fs/xfs/xfs.h21
-rw-r--r--fs/xfs/xfs_acl.c951
-rw-r--r--fs/xfs/xfs_acl.h110
-rw-r--r--fs/xfs/xfs_ag.h179
-rw-r--r--fs/xfs/xfs_alloc.c1675
-rw-r--r--fs/xfs/xfs_alloc.h101
-rw-r--r--fs/xfs/xfs_alloc_btree.c2469
-rw-r--r--fs/xfs/xfs_alloc_btree.h150
-rw-r--r--fs/xfs/xfs_aops.c1770
-rw-r--r--fs/xfs/xfs_aops.h (renamed from fs/xfs/linux-2.6/xfs_aops.h)21
-rw-r--r--fs/xfs/xfs_arch.h221
-rw-r--r--fs/xfs/xfs_attr.c2024
-rw-r--r--fs/xfs/xfs_attr.h107
-rw-r--r--fs/xfs/xfs_attr_inactive.c452
-rw-r--r--fs/xfs/xfs_attr_leaf.c2837
-rw-r--r--fs/xfs/xfs_attr_leaf.h254
-rw-r--r--fs/xfs/xfs_attr_list.c653
-rw-r--r--fs/xfs/xfs_attr_remote.c628
-rw-r--r--fs/xfs/xfs_attr_remote.h (renamed from fs/xfs/linux-2.6/xfs_version.h)18
-rw-r--r--fs/xfs/xfs_attr_sf.h46
-rw-r--r--fs/xfs/xfs_bit.c107
-rw-r--r--fs/xfs/xfs_bit.h46
-rw-r--r--fs/xfs/xfs_bmap.c7904
-rw-r--r--fs/xfs/xfs_bmap.h403
-rw-r--r--fs/xfs/xfs_bmap_btree.c2801
-rw-r--r--fs/xfs/xfs_bmap_btree.h294
-rw-r--r--fs/xfs/xfs_bmap_util.c1897
-rw-r--r--fs/xfs/xfs_bmap_util.h112
-rw-r--r--fs/xfs/xfs_btree.c4276
-rw-r--r--fs/xfs/xfs_btree.h468
-rw-r--r--fs/xfs/xfs_buf.c1890
-rw-r--r--fs/xfs/xfs_buf.h393
-rw-r--r--fs/xfs/xfs_buf_item.c1385
-rw-r--r--fs/xfs/xfs_buf_item.h103
-rw-r--r--fs/xfs/xfs_cksum.h63
-rw-r--r--fs/xfs/xfs_clnt.h104
-rw-r--r--fs/xfs/xfs_da_btree.c2601
-rw-r--r--fs/xfs/xfs_da_btree.h215
-rw-r--r--fs/xfs/xfs_da_format.c911
-rw-r--r--fs/xfs/xfs_da_format.h861
-rw-r--r--fs/xfs/xfs_dfrag.c386
-rw-r--r--fs/xfs/xfs_dfrag.h56
-rw-r--r--fs/xfs/xfs_dinode.h188
-rw-r--r--fs/xfs/xfs_dir2.c770
-rw-r--r--fs/xfs/xfs_dir2.h164
-rw-r--r--fs/xfs/xfs_dir2_block.c1089
-rw-r--r--fs/xfs/xfs_dir2_block.h92
-rw-r--r--fs/xfs/xfs_dir2_data.c716
-rw-r--r--fs/xfs/xfs_dir2_data.h184
-rw-r--r--fs/xfs/xfs_dir2_leaf.c1869
-rw-r--r--fs/xfs/xfs_dir2_leaf.h253
-rw-r--r--fs/xfs/xfs_dir2_node.c1914
-rw-r--r--fs/xfs/xfs_dir2_node.h102
-rw-r--r--fs/xfs/xfs_dir2_priv.h274
-rw-r--r--fs/xfs/xfs_dir2_readdir.c700
-rw-r--r--fs/xfs/xfs_dir2_sf.c614
-rw-r--r--fs/xfs/xfs_dir2_sf.h178
-rw-r--r--fs/xfs/xfs_dir2_trace.c214
-rw-r--r--fs/xfs/xfs_dir2_trace.h72
-rw-r--r--fs/xfs/xfs_discard.c240
-rw-r--r--fs/xfs/xfs_discard.h10
-rw-r--r--fs/xfs/xfs_dmapi.h171
-rw-r--r--fs/xfs/xfs_dmops.c56
-rw-r--r--fs/xfs/xfs_dquot.c1105
-rw-r--r--fs/xfs/xfs_dquot.h173
-rw-r--r--fs/xfs/xfs_dquot_buf.c290
-rw-r--r--fs/xfs/xfs_dquot_item.c445
-rw-r--r--fs/xfs/xfs_dquot_item.h (renamed from fs/xfs/quota/xfs_dquot_item.h)7
-rw-r--r--fs/xfs/xfs_error.c157
-rw-r--r--fs/xfs/xfs_error.h53
-rw-r--r--fs/xfs/xfs_export.c (renamed from fs/xfs/linux-2.6/xfs_export.c)150
-rw-r--r--fs/xfs/xfs_export.h (renamed from fs/xfs/linux-2.6/xfs_export.h)0
-rw-r--r--fs/xfs/xfs_extent_busy.c605
-rw-r--r--fs/xfs/xfs_extent_busy.h73
-rw-r--r--fs/xfs/xfs_extfree_item.c483
-rw-r--r--fs/xfs/xfs_extfree_item.h117
-rw-r--r--fs/xfs/xfs_file.c1433
-rw-r--r--fs/xfs/xfs_filestream.c807
-rw-r--r--fs/xfs/xfs_filestream.h102
-rw-r--r--fs/xfs/xfs_format.h428
-rw-r--r--fs/xfs/xfs_fs.h125
-rw-r--r--fs/xfs/xfs_fsops.c510
-rw-r--r--fs/xfs/xfs_fsops.h2
-rw-r--r--fs/xfs/xfs_globals.c (renamed from fs/xfs/linux-2.6/xfs_globals.c)13
-rw-r--r--fs/xfs/xfs_ialloc.c2517
-rw-r--r--fs/xfs/xfs_ialloc.h72
-rw-r--r--fs/xfs/xfs_ialloc_btree.c2258
-rw-r--r--fs/xfs/xfs_ialloc_btree.h173
-rw-r--r--fs/xfs/xfs_icache.c1327
-rw-r--r--fs/xfs/xfs_icache.h104
-rw-r--r--fs/xfs/xfs_icreate_item.c189
-rw-r--r--fs/xfs/xfs_icreate_item.h (renamed from fs/xfs/linux-2.6/xfs_ioctl32.h)22
-rw-r--r--fs/xfs/xfs_iget.c800
-rw-r--r--fs/xfs/xfs_imap.h40
-rw-r--r--fs/xfs/xfs_inode.c5800
-rw-r--r--fs/xfs/xfs_inode.h599
-rw-r--r--fs/xfs/xfs_inode_buf.c479
-rw-r--r--fs/xfs/xfs_inode_buf.h50
-rw-r--r--fs/xfs/xfs_inode_fork.c1906
-rw-r--r--fs/xfs/xfs_inode_fork.h171
-rw-r--r--fs/xfs/xfs_inode_item.c1243
-rw-r--r--fs/xfs/xfs_inode_item.h155
-rw-r--r--fs/xfs/xfs_inum.h17
-rw-r--r--fs/xfs/xfs_ioctl.c1810
-rw-r--r--fs/xfs/xfs_ioctl.h95
-rw-r--r--fs/xfs/xfs_ioctl32.c681
-rw-r--r--fs/xfs/xfs_ioctl32.h237
-rw-r--r--fs/xfs/xfs_iomap.c900
-rw-r--r--fs/xfs/xfs_iomap.h69
-rw-r--r--fs/xfs/xfs_iops.c1299
-rw-r--r--fs/xfs/xfs_iops.h (renamed from fs/xfs/linux-2.6/xfs_iops.h)21
-rw-r--r--fs/xfs/xfs_itable.c518
-rw-r--r--fs/xfs/xfs_itable.h28
-rw-r--r--fs/xfs/xfs_linux.h (renamed from fs/xfs/linux-2.6/xfs_linux.h)184
-rw-r--r--fs/xfs/xfs_log.c3575
-rw-r--r--fs/xfs/xfs_log.h207
-rw-r--r--fs/xfs/xfs_log_cil.c972
-rw-r--r--fs/xfs/xfs_log_format.h679
-rw-r--r--fs/xfs/xfs_log_priv.h514
-rw-r--r--fs/xfs/xfs_log_recover.c3600
-rw-r--r--fs/xfs/xfs_log_recover.h25
-rw-r--r--fs/xfs/xfs_log_rlimit.c150
-rw-r--r--fs/xfs/xfs_message.c114
-rw-r--r--fs/xfs/xfs_message.h64
-rw-r--r--fs/xfs/xfs_mount.c1777
-rw-r--r--fs/xfs/xfs_mount.h421
-rw-r--r--fs/xfs/xfs_mru_cache.c193
-rw-r--r--fs/xfs/xfs_mru_cache.h33
-rw-r--r--fs/xfs/xfs_qm.c1966
-rw-r--r--fs/xfs/xfs_qm.h180
-rw-r--r--fs/xfs/xfs_qm_bhv.c151
-rw-r--r--fs/xfs/xfs_qm_syscalls.c1007
-rw-r--r--fs/xfs/xfs_qmops.c154
-rw-r--r--fs/xfs/xfs_quota.h396
-rw-r--r--fs/xfs/xfs_quota_defs.h161
-rw-r--r--fs/xfs/xfs_quotaops.c175
-rw-r--r--fs/xfs/xfs_refcache.h52
-rw-r--r--fs/xfs/xfs_rename.c431
-rw-r--r--fs/xfs/xfs_rtalloc.c1744
-rw-r--r--fs/xfs/xfs_rtalloc.h92
-rw-r--r--fs/xfs/xfs_rtbitmap.c973
-rw-r--r--fs/xfs/xfs_rw.c423
-rw-r--r--fs/xfs/xfs_rw.h88
-rw-r--r--fs/xfs/xfs_sb.c836
-rw-r--r--fs/xfs/xfs_sb.h384
-rw-r--r--fs/xfs/xfs_shared.h246
-rw-r--r--fs/xfs/xfs_stats.c198
-rw-r--r--fs/xfs/xfs_stats.h (renamed from fs/xfs/linux-2.6/xfs_stats.h)103
-rw-r--r--fs/xfs/xfs_super.c1809
-rw-r--r--fs/xfs/xfs_super.h (renamed from fs/xfs/linux-2.6/xfs_super.h)49
-rw-r--r--fs/xfs/xfs_symlink.c599
-rw-r--r--fs/xfs/xfs_symlink.h (renamed from fs/xfs/linux-2.6/mutex.h)16
-rw-r--r--fs/xfs/xfs_symlink_remote.c201
-rw-r--r--fs/xfs/xfs_sysctl.c (renamed from fs/xfs/linux-2.6/xfs_sysctl.c)132
-rw-r--r--fs/xfs/xfs_sysctl.h (renamed from fs/xfs/linux-2.6/xfs_sysctl.h)8
-rw-r--r--fs/xfs/xfs_trace.c56
-rw-r--r--fs/xfs/xfs_trace.h2073
-rw-r--r--fs/xfs/xfs_trans.c1164
-rw-r--r--fs/xfs/xfs_trans.h932
-rw-r--r--fs/xfs/xfs_trans_ail.c1069
-rw-r--r--fs/xfs/xfs_trans_buf.c984
-rw-r--r--fs/xfs/xfs_trans_dquot.c (renamed from fs/xfs/quota/xfs_trans_dquot.c)368
-rw-r--r--fs/xfs/xfs_trans_extfree.c39
-rw-r--r--fs/xfs/xfs_trans_inode.c283
-rw-r--r--fs/xfs/xfs_trans_item.c539
-rw-r--r--fs/xfs/xfs_trans_priv.h159
-rw-r--r--fs/xfs/xfs_trans_resv.c894
-rw-r--r--fs/xfs/xfs_trans_resv.h117
-rw-r--r--fs/xfs/xfs_trans_space.h12
-rw-r--r--fs/xfs/xfs_types.h96
-rw-r--r--fs/xfs/xfs_utils.c403
-rw-r--r--fs/xfs/xfs_utils.h32
-rw-r--r--fs/xfs/xfs_vfsops.c1362
-rw-r--r--fs/xfs/xfs_vfsops.h21
-rw-r--r--fs/xfs/xfs_vnode.h (renamed from fs/xfs/linux-2.6/xfs_cred.h)38
-rw-r--r--fs/xfs/xfs_vnodeops.c4005
-rw-r--r--fs/xfs/xfs_vnodeops.h80
-rw-r--r--fs/xfs/xfs_xattr.c245
220 files changed, 70464 insertions, 71169 deletions
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 3f53dd101f9..399e8cec6e6 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -1,6 +1,8 @@
config XFS_FS
tristate "XFS filesystem support"
depends on BLOCK
+ select EXPORTFS
+ select LIBCRC32C
help
XFS is a high performance journaling filesystem which originated
on the SGI IRIX platform. It is completely multi-threaded, can
@@ -21,6 +23,7 @@ config XFS_FS
config XFS_QUOTA
bool "XFS Quota support"
depends on XFS_FS
+ select QUOTACTL
help
If you say Y here, you will be able to set limits for disk usage on
a per user and/or a per group basis under XFS. XFS considers quota
@@ -38,6 +41,7 @@ config XFS_QUOTA
config XFS_POSIX_ACL
bool "XFS POSIX ACL support"
depends on XFS_FS
+ select FS_POSIX_ACL
help
POSIX Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
@@ -65,9 +69,22 @@ config XFS_RT
If unsure, say N.
+config XFS_WARN
+ bool "XFS Verbose Warnings"
+ depends on XFS_FS && !XFS_DEBUG
+ help
+ Say Y here to get an XFS build with many additional warnings.
+ It converts ASSERT checks to WARN, so will log any out-of-bounds
+ conditions that occur that would otherwise be missed. It is much
+ lighter weight than XFS_DEBUG and does not modify algorithms and will
+ not cause the kernel to panic on non-fatal errors.
+
+ However, similar to XFS_DEBUG, it is only advisable to use this if you
+ are debugging a particular problem.
+
config XFS_DEBUG
- bool "XFS Debugging support (EXPERIMENTAL)"
- depends on XFS_FS && EXPERIMENTAL
+ bool "XFS Debugging support"
+ depends on XFS_FS
help
Say Y here to get an XFS build with many debugging features,
including ASSERT checks, function wrappers around macros,
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 36ec614e699..c21f4350666 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -16,102 +16,101 @@
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
-EXTRA_CFLAGS += -I$(src) -I$(src)/linux-2.6 -funsigned-char
+ccflags-y += -I$(src) # needed for trace events
-XFS_LINUX := linux-2.6
-
-ifeq ($(CONFIG_XFS_DEBUG),y)
- EXTRA_CFLAGS += -g
-endif
+ccflags-$(CONFIG_XFS_DEBUG) += -g
obj-$(CONFIG_XFS_FS) += xfs.o
-xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
- xfs_dquot.o \
- xfs_dquot_item.o \
- xfs_trans_dquot.o \
- xfs_qm_syscalls.o \
- xfs_qm_bhv.o \
- xfs_qm.o)
-
-ifeq ($(CONFIG_XFS_QUOTA),y)
-xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
-endif
-
-xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
-xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
-xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o
-xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o
-xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o
+# this one should be compiled first, as the tracing macros can easily blow up
+xfs-y += xfs_trace.o
+# highlevel code
+xfs-y += xfs_aops.o \
+ xfs_attr_inactive.o \
+ xfs_attr_list.o \
+ xfs_bit.o \
+ xfs_bmap_util.o \
+ xfs_buf.o \
+ xfs_dir2_readdir.o \
+ xfs_discard.o \
+ xfs_error.o \
+ xfs_export.o \
+ xfs_extent_busy.o \
+ xfs_file.o \
+ xfs_filestream.o \
+ xfs_fsops.o \
+ xfs_globals.o \
+ xfs_icache.o \
+ xfs_ioctl.o \
+ xfs_iomap.o \
+ xfs_iops.o \
+ xfs_itable.o \
+ xfs_message.o \
+ xfs_mount.o \
+ xfs_mru_cache.o \
+ xfs_super.o \
+ xfs_symlink.o \
+ xfs_trans.o \
+ xfs_xattr.o \
+ kmem.o \
+ uuid.o
+# code shared with libxfs
xfs-y += xfs_alloc.o \
xfs_alloc_btree.o \
xfs_attr.o \
xfs_attr_leaf.o \
- xfs_bit.o \
+ xfs_attr_remote.o \
xfs_bmap.o \
xfs_bmap_btree.o \
xfs_btree.o \
- xfs_buf_item.o \
xfs_da_btree.o \
+ xfs_da_format.o \
xfs_dir2.o \
xfs_dir2_block.o \
xfs_dir2_data.o \
xfs_dir2_leaf.o \
xfs_dir2_node.o \
xfs_dir2_sf.o \
- xfs_error.o \
- xfs_extfree_item.o \
- xfs_filestream.o \
- xfs_fsops.o \
+ xfs_dquot_buf.o \
xfs_ialloc.o \
xfs_ialloc_btree.o \
- xfs_iget.o \
+ xfs_icreate_item.o \
xfs_inode.o \
- xfs_inode_item.o \
- xfs_iomap.o \
- xfs_itable.o \
- xfs_dfrag.o \
- xfs_log.o \
+ xfs_inode_fork.o \
+ xfs_inode_buf.o \
xfs_log_recover.o \
- xfs_mount.o \
- xfs_mru_cache.o \
- xfs_rename.o \
- xfs_trans.o \
+ xfs_log_rlimit.o \
+ xfs_sb.o \
+ xfs_symlink_remote.o \
+ xfs_trans_resv.o
+
+# low-level transaction/log code
+xfs-y += xfs_log.o \
+ xfs_log_cil.o \
+ xfs_buf_item.o \
+ xfs_extfree_item.o \
+ xfs_inode_item.o \
xfs_trans_ail.o \
xfs_trans_buf.o \
xfs_trans_extfree.o \
xfs_trans_inode.o \
- xfs_trans_item.o \
- xfs_utils.o \
- xfs_vfsops.o \
- xfs_vnodeops.o \
- xfs_rw.o \
- xfs_dmops.o \
- xfs_qmops.o
-
-xfs-$(CONFIG_XFS_TRACE) += xfs_dir2_trace.o
-# Objects in linux/
-xfs-y += $(addprefix $(XFS_LINUX)/, \
- kmem.o \
- xfs_aops.o \
- xfs_buf.o \
- xfs_export.o \
- xfs_file.o \
- xfs_fs_subr.o \
- xfs_globals.o \
- xfs_ioctl.o \
- xfs_iops.o \
- xfs_lrw.o \
- xfs_super.o \
- xfs_vnode.o)
-
-# Objects in support/
-xfs-y += $(addprefix support/, \
- debug.o \
- uuid.o)
+# optional features
+xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \
+ xfs_dquot_item.o \
+ xfs_trans_dquot.o \
+ xfs_qm_syscalls.o \
+ xfs_qm_bhv.o \
+ xfs_qm.o \
+ xfs_quotaops.o
-xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o
+# xfs_rtbitmap is shared with libxfs
+xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o \
+ xfs_rtbitmap.o
+xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
+xfs-$(CONFIG_PROC_FS) += xfs_stats.o
+xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o
+xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/kmem.c
index 9b1bb17a050..844e288b957 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/kmem.c
@@ -16,81 +16,84 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <linux/highmem.h>
+#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include "time.h"
#include "kmem.h"
+#include "xfs_message.h"
-#define MAX_VMALLOCS 6
-#define MAX_SLAB_SIZE 0x20000
+/*
+ * Greedy allocation. May fail and may return vmalloced memory.
+ */
+void *
+kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
+{
+ void *ptr;
+ size_t kmsize = maxsize;
+
+ while (!(ptr = vzalloc(kmsize))) {
+ if ((kmsize >>= 1) <= minsize)
+ kmsize = minsize;
+ }
+ if (ptr)
+ *size = kmsize;
+ return ptr;
+}
void *
-kmem_alloc(size_t size, unsigned int __nocast flags)
+kmem_alloc(size_t size, xfs_km_flags_t flags)
{
int retries = 0;
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
-#ifdef DEBUG
- if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
- printk(KERN_WARNING "Large %s attempt, size=%ld\n",
- __func__, (long)size);
- dump_stack();
- }
-#endif
-
do {
- if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
- ptr = kmalloc(size, lflags);
- else
- ptr = __vmalloc(size, lflags, PAGE_KERNEL);
+ ptr = kmalloc(size, lflags);
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
- printk(KERN_ERR "XFS: possible memory allocation "
- "deadlock in %s (mode:0x%x)\n",
+ xfs_err(NULL,
+ "possible memory allocation deadlock in %s (mode:0x%x)",
__func__, lflags);
- congestion_wait(WRITE, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}
void *
-kmem_zalloc(size_t size, unsigned int __nocast flags)
+kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
{
+ unsigned noio_flag = 0;
void *ptr;
+ gfp_t lflags;
- ptr = kmem_alloc(size, flags);
+ ptr = kmem_zalloc(size, flags | KM_MAYFAIL);
if (ptr)
- memset((char *)ptr, 0, (int)size);
- return ptr;
-}
+ return ptr;
-void *
-kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
- unsigned int __nocast flags)
-{
- void *ptr;
- size_t kmsize = maxsize;
- unsigned int kmflags = (flags & ~KM_SLEEP) | KM_NOSLEEP;
+ /*
+ * __vmalloc() will allocate data pages and auxillary structures (e.g.
+ * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
+ * here. Hence we need to tell memory reclaim that we are in such a
+ * context via PF_MEMALLOC_NOIO to prevent memory reclaim re-entering
+ * the filesystem here and potentially deadlocking.
+ */
+ if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
+ noio_flag = memalloc_noio_save();
+
+ lflags = kmem_flags_convert(flags);
+ ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+
+ if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
+ memalloc_noio_restore(noio_flag);
- while (!(ptr = kmem_zalloc(kmsize, kmflags))) {
- if ((kmsize <= minsize) && (flags & KM_NOSLEEP))
- break;
- if ((kmsize >>= 1) <= minsize) {
- kmsize = minsize;
- kmflags = flags;
- }
- }
- if (ptr)
- *size = kmsize;
return ptr;
}
void
-kmem_free(void *ptr, size_t size)
+kmem_free(const void *ptr)
{
if (!is_vmalloc_addr(ptr)) {
kfree(ptr);
@@ -100,8 +103,8 @@ kmem_free(void *ptr, size_t size)
}
void *
-kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
- unsigned int __nocast flags)
+kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
+ xfs_km_flags_t flags)
{
void *new;
@@ -110,13 +113,13 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
if (new)
memcpy(new, ptr,
((oldsize < newsize) ? oldsize : newsize));
- kmem_free(ptr, oldsize);
+ kmem_free(ptr);
}
return new;
}
void *
-kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
+kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
{
int retries = 0;
gfp_t lflags = kmem_flags_convert(flags);
@@ -127,20 +130,9 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
- printk(KERN_ERR "XFS: possible memory allocation "
- "deadlock in %s (mode:0x%x)\n",
+ xfs_err(NULL,
+ "possible memory allocation deadlock in %s (mode:0x%x)",
__func__, lflags);
- congestion_wait(WRITE, HZ/50);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}
-
-void *
-kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
-{
- void *ptr;
-
- ptr = kmem_zone_alloc(zone, flags);
- if (ptr)
- memset((char *)ptr, 0, kmem_cache_size(zone));
- return ptr;
-}
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/kmem.h
index a20683cf74d..64db0e53ede 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/kmem.h
@@ -21,16 +21,18 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/vmalloc.h>
/*
* General memory allocation interfaces
*/
-#define KM_SLEEP 0x0001u
-#define KM_NOSLEEP 0x0002u
-#define KM_NOFS 0x0004u
-#define KM_MAYFAIL 0x0008u
-#define KM_LARGE 0x0010u
+typedef unsigned __bitwise xfs_km_flags_t;
+#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
+#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
+#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
+#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
+#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
/*
* We use a special process flag to avoid recursive callbacks into
@@ -38,11 +40,11 @@
* warnings, so we explicitly skip any generic ones (silly of us).
*/
static inline gfp_t
-kmem_flags_convert(unsigned int __nocast flags)
+kmem_flags_convert(xfs_km_flags_t flags)
{
gfp_t lflags;
- BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE));
+ BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
if (flags & KM_NOSLEEP) {
lflags = GFP_ATOMIC | __GFP_NOWARN;
@@ -51,14 +53,26 @@ kmem_flags_convert(unsigned int __nocast flags)
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
lflags &= ~__GFP_FS;
}
+
+ if (flags & KM_ZERO)
+ lflags |= __GFP_ZERO;
+
return lflags;
}
-extern void *kmem_alloc(size_t, unsigned int __nocast);
-extern void *kmem_zalloc(size_t, unsigned int __nocast);
-extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
-extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
-extern void kmem_free(void *, size_t);
+extern void *kmem_alloc(size_t, xfs_km_flags_t);
+extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
+extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
+extern void kmem_free(const void *);
+
+
+extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
+
+static inline void *
+kmem_zalloc(size_t size, xfs_km_flags_t flags)
+{
+ return kmem_alloc(size, flags | KM_ZERO);
+}
/*
* Zone interfaces
@@ -97,13 +111,12 @@ kmem_zone_destroy(kmem_zone_t *zone)
kmem_cache_destroy(zone);
}
-extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
-extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
+extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
-static inline int
-kmem_shake_allow(gfp_t gfp_mask)
+static inline void *
+kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
{
- return (gfp_mask & __GFP_WAIT) != 0;
+ return kmem_zone_alloc(zone, flags | KM_ZERO);
}
#endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h
deleted file mode 100644
index 3abe7e9ceb3..00000000000
--- a/fs/xfs/linux-2.6/sema.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_SUPPORT_SEMA_H__
-#define __XFS_SUPPORT_SEMA_H__
-
-#include <linux/time.h>
-#include <linux/wait.h>
-#include <linux/semaphore.h>
-#include <asm/atomic.h>
-
-/*
- * sema_t structure just maps to struct semaphore in Linux kernel.
- */
-
-typedef struct semaphore sema_t;
-
-#define initnsema(sp, val, name) sema_init(sp, val)
-#define psema(sp, b) down(sp)
-#define vsema(sp) up(sp)
-#define freesema(sema) do { } while (0)
-
-static inline int issemalocked(sema_t *sp)
-{
- return down_trylock(sp) || (up(sp), 0);
-}
-
-/*
- * Map cpsema (try to get the sema) to down_trylock. We need to switch
- * the return values since cpsema returns 1 (acquired) 0 (failed) and
- * down_trylock returns the reverse 0 (acquired) 1 (failed).
- */
-static inline int cpsema(sema_t *sp)
-{
- return down_trylock(sp) ? 0 : 1;
-}
-
-#endif /* __XFS_SUPPORT_SEMA_H__ */
diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h
deleted file mode 100644
index 351a8f454bd..00000000000
--- a/fs/xfs/linux-2.6/sv.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_SUPPORT_SV_H__
-#define __XFS_SUPPORT_SV_H__
-
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-
-/*
- * Synchronisation variables.
- *
- * (Parameters "pri", "svf" and "rts" are not implemented)
- */
-
-typedef struct sv_s {
- wait_queue_head_t waiters;
-} sv_t;
-
-#define SV_FIFO 0x0 /* sv_t is FIFO type */
-#define SV_LIFO 0x2 /* sv_t is LIFO type */
-#define SV_PRIO 0x4 /* sv_t is PRIO type */
-#define SV_KEYED 0x6 /* sv_t is KEYED type */
-#define SV_DEFAULT SV_FIFO
-
-
-static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
- unsigned long timeout)
-{
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue_exclusive(&sv->waiters, &wait);
- __set_current_state(state);
- spin_unlock(lock);
-
- schedule_timeout(timeout);
-
- remove_wait_queue(&sv->waiters, &wait);
-}
-
-#define sv_init(sv,flag,name) \
- init_waitqueue_head(&(sv)->waiters)
-#define sv_destroy(sv) \
- /*NOTHING*/
-#define sv_wait(sv, pri, lock, s) \
- _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
-#define sv_wait_sig(sv, pri, lock, s) \
- _sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
-#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \
- _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts))
-#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \
- _sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts))
-#define sv_signal(sv) \
- wake_up(&(sv)->waiters)
-#define sv_broadcast(sv) \
- wake_up_all(&(sv)->waiters)
-
-#endif /* __XFS_SUPPORT_SV_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
deleted file mode 100644
index a55c3b26d84..00000000000
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ /dev/null
@@ -1,1586 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_trans.h"
-#include "xfs_dmapi.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_error.h"
-#include "xfs_rw.h"
-#include "xfs_iomap.h"
-#include "xfs_vnodeops.h"
-#include <linux/mpage.h>
-#include <linux/pagevec.h>
-#include <linux/writeback.h>
-
-STATIC void
-xfs_count_page_state(
- struct page *page,
- int *delalloc,
- int *unmapped,
- int *unwritten)
-{
- struct buffer_head *bh, *head;
-
- *delalloc = *unmapped = *unwritten = 0;
-
- bh = head = page_buffers(page);
- do {
- if (buffer_uptodate(bh) && !buffer_mapped(bh))
- (*unmapped) = 1;
- else if (buffer_unwritten(bh))
- (*unwritten) = 1;
- else if (buffer_delay(bh))
- (*delalloc) = 1;
- } while ((bh = bh->b_this_page) != head);
-}
-
-#if defined(XFS_RW_TRACE)
-void
-xfs_page_trace(
- int tag,
- struct inode *inode,
- struct page *page,
- unsigned long pgoff)
-{
- xfs_inode_t *ip;
- bhv_vnode_t *vp = vn_from_inode(inode);
- loff_t isize = i_size_read(inode);
- loff_t offset = page_offset(page);
- int delalloc = -1, unmapped = -1, unwritten = -1;
-
- if (page_has_buffers(page))
- xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
-
- ip = xfs_vtoi(vp);
- if (!ip->i_rwtrace)
- return;
-
- ktrace_enter(ip->i_rwtrace,
- (void *)((unsigned long)tag),
- (void *)ip,
- (void *)inode,
- (void *)page,
- (void *)pgoff,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
- (void *)((unsigned long)(isize & 0xffffffff)),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)delalloc),
- (void *)((unsigned long)unmapped),
- (void *)((unsigned long)unwritten),
- (void *)((unsigned long)current_pid()),
- (void *)NULL);
-}
-#else
-#define xfs_page_trace(tag, inode, page, pgoff)
-#endif
-
-STATIC struct block_device *
-xfs_find_bdev_for_inode(
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = ip->i_mount;
-
- if (XFS_IS_REALTIME_INODE(ip))
- return mp->m_rtdev_targp->bt_bdev;
- else
- return mp->m_ddev_targp->bt_bdev;
-}
-
-/*
- * Schedule IO completion handling on a xfsdatad if this was
- * the final hold on this ioend. If we are asked to wait,
- * flush the workqueue.
- */
-STATIC void
-xfs_finish_ioend(
- xfs_ioend_t *ioend,
- int wait)
-{
- if (atomic_dec_and_test(&ioend->io_remaining)) {
- queue_work(xfsdatad_workqueue, &ioend->io_work);
- if (wait)
- flush_workqueue(xfsdatad_workqueue);
- }
-}
-
-/*
- * We're now finished for good with this ioend structure.
- * Update the page state via the associated buffer_heads,
- * release holds on the inode and bio, and finally free
- * up memory. Do not use the ioend after this.
- */
-STATIC void
-xfs_destroy_ioend(
- xfs_ioend_t *ioend)
-{
- struct buffer_head *bh, *next;
-
- for (bh = ioend->io_buffer_head; bh; bh = next) {
- next = bh->b_private;
- bh->b_end_io(bh, !ioend->io_error);
- }
- if (unlikely(ioend->io_error)) {
- vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error,
- __FILE__,__LINE__);
- }
- vn_iowake(XFS_I(ioend->io_inode));
- mempool_free(ioend, xfs_ioend_pool);
-}
-
-/*
- * Update on-disk file size now that data has been written to disk.
- * The current in-memory file size is i_size. If a write is beyond
- * eof i_new_size will be the intended file size until i_size is
- * updated. If this write does not extend all the way to the valid
- * file size then restrict this update to the end of the write.
- */
-STATIC void
-xfs_setfilesize(
- xfs_ioend_t *ioend)
-{
- xfs_inode_t *ip = XFS_I(ioend->io_inode);
- xfs_fsize_t isize;
- xfs_fsize_t bsize;
-
- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
- ASSERT(ioend->io_type != IOMAP_READ);
-
- if (unlikely(ioend->io_error))
- return;
-
- bsize = ioend->io_offset + ioend->io_size;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- isize = MAX(ip->i_size, ip->i_new_size);
- isize = MIN(isize, bsize);
-
- if (ip->i_d.di_size < isize) {
- ip->i_d.di_size = isize;
- ip->i_update_core = 1;
- ip->i_update_size = 1;
- mark_inode_dirty_sync(ioend->io_inode);
- }
-
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-}
-
-/*
- * Buffered IO write completion for delayed allocate extents.
- */
-STATIC void
-xfs_end_bio_delalloc(
- struct work_struct *work)
-{
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
-
- xfs_setfilesize(ioend);
- xfs_destroy_ioend(ioend);
-}
-
-/*
- * Buffered IO write completion for regular, written extents.
- */
-STATIC void
-xfs_end_bio_written(
- struct work_struct *work)
-{
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
-
- xfs_setfilesize(ioend);
- xfs_destroy_ioend(ioend);
-}
-
-/*
- * IO write completion for unwritten extents.
- *
- * Issue transactions to convert a buffer range from unwritten
- * to written extents.
- */
-STATIC void
-xfs_end_bio_unwritten(
- struct work_struct *work)
-{
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
- struct xfs_inode *ip = XFS_I(ioend->io_inode);
- xfs_off_t offset = ioend->io_offset;
- size_t size = ioend->io_size;
-
- if (likely(!ioend->io_error)) {
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- int error;
- error = xfs_iomap_write_unwritten(ip, offset, size);
- if (error)
- ioend->io_error = error;
- }
- xfs_setfilesize(ioend);
- }
- xfs_destroy_ioend(ioend);
-}
-
-/*
- * IO read completion for regular, written extents.
- */
-STATIC void
-xfs_end_bio_read(
- struct work_struct *work)
-{
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
-
- xfs_destroy_ioend(ioend);
-}
-
-/*
- * Allocate and initialise an IO completion structure.
- * We need to track unwritten extent write completion here initially.
- * We'll need to extend this for updating the ondisk inode size later
- * (vs. incore size).
- */
-STATIC xfs_ioend_t *
-xfs_alloc_ioend(
- struct inode *inode,
- unsigned int type)
-{
- xfs_ioend_t *ioend;
-
- ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
-
- /*
- * Set the count to 1 initially, which will prevent an I/O
- * completion callback from happening before we have started
- * all the I/O from calling the completion routine too early.
- */
- atomic_set(&ioend->io_remaining, 1);
- ioend->io_error = 0;
- ioend->io_list = NULL;
- ioend->io_type = type;
- ioend->io_inode = inode;
- ioend->io_buffer_head = NULL;
- ioend->io_buffer_tail = NULL;
- atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
- ioend->io_offset = 0;
- ioend->io_size = 0;
-
- if (type == IOMAP_UNWRITTEN)
- INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
- else if (type == IOMAP_DELAY)
- INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
- else if (type == IOMAP_READ)
- INIT_WORK(&ioend->io_work, xfs_end_bio_read);
- else
- INIT_WORK(&ioend->io_work, xfs_end_bio_written);
-
- return ioend;
-}
-
-STATIC int
-xfs_map_blocks(
- struct inode *inode,
- loff_t offset,
- ssize_t count,
- xfs_iomap_t *mapp,
- int flags)
-{
- xfs_inode_t *ip = XFS_I(inode);
- int error, nmaps = 1;
-
- error = xfs_iomap(ip, offset, count,
- flags, mapp, &nmaps);
- if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
- xfs_iflags_set(ip, XFS_IMODIFIED);
- return -error;
-}
-
-STATIC_INLINE int
-xfs_iomap_valid(
- xfs_iomap_t *iomapp,
- loff_t offset)
-{
- return offset >= iomapp->iomap_offset &&
- offset < iomapp->iomap_offset + iomapp->iomap_bsize;
-}
-
-/*
- * BIO completion handler for buffered IO.
- */
-STATIC void
-xfs_end_bio(
- struct bio *bio,
- int error)
-{
- xfs_ioend_t *ioend = bio->bi_private;
-
- ASSERT(atomic_read(&bio->bi_cnt) >= 1);
- ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
-
- /* Toss bio and pass work off to an xfsdatad thread */
- bio->bi_private = NULL;
- bio->bi_end_io = NULL;
- bio_put(bio);
-
- xfs_finish_ioend(ioend, 0);
-}
-
-STATIC void
-xfs_submit_ioend_bio(
- xfs_ioend_t *ioend,
- struct bio *bio)
-{
- atomic_inc(&ioend->io_remaining);
-
- bio->bi_private = ioend;
- bio->bi_end_io = xfs_end_bio;
-
- submit_bio(WRITE, bio);
- ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
- bio_put(bio);
-}
-
-STATIC struct bio *
-xfs_alloc_ioend_bio(
- struct buffer_head *bh)
-{
- struct bio *bio;
- int nvecs = bio_get_nr_vecs(bh->b_bdev);
-
- do {
- bio = bio_alloc(GFP_NOIO, nvecs);
- nvecs >>= 1;
- } while (!bio);
-
- ASSERT(bio->bi_private == NULL);
- bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio->bi_bdev = bh->b_bdev;
- bio_get(bio);
- return bio;
-}
-
-STATIC void
-xfs_start_buffer_writeback(
- struct buffer_head *bh)
-{
- ASSERT(buffer_mapped(bh));
- ASSERT(buffer_locked(bh));
- ASSERT(!buffer_delay(bh));
- ASSERT(!buffer_unwritten(bh));
-
- mark_buffer_async_write(bh);
- set_buffer_uptodate(bh);
- clear_buffer_dirty(bh);
-}
-
-STATIC void
-xfs_start_page_writeback(
- struct page *page,
- struct writeback_control *wbc,
- int clear_dirty,
- int buffers)
-{
- ASSERT(PageLocked(page));
- ASSERT(!PageWriteback(page));
- if (clear_dirty)
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
- unlock_page(page);
- /* If no buffers on the page are to be written, finish it here */
- if (!buffers)
- end_page_writeback(page);
-}
-
-static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
-{
- return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
-}
-
-/*
- * Submit all of the bios for all of the ioends we have saved up, covering the
- * initial writepage page and also any probed pages.
- *
- * Because we may have multiple ioends spanning a page, we need to start
- * writeback on all the buffers before we submit them for I/O. If we mark the
- * buffers as we got, then we can end up with a page that only has buffers
- * marked async write and I/O complete on can occur before we mark the other
- * buffers async write.
- *
- * The end result of this is that we trip a bug in end_page_writeback() because
- * we call it twice for the one page as the code in end_buffer_async_write()
- * assumes that all buffers on the page are started at the same time.
- *
- * The fix is two passes across the ioend list - one to start writeback on the
- * buffer_heads, and then submit them for I/O on the second pass.
- */
-STATIC void
-xfs_submit_ioend(
- xfs_ioend_t *ioend)
-{
- xfs_ioend_t *head = ioend;
- xfs_ioend_t *next;
- struct buffer_head *bh;
- struct bio *bio;
- sector_t lastblock = 0;
-
- /* Pass 1 - start writeback */
- do {
- next = ioend->io_list;
- for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
- xfs_start_buffer_writeback(bh);
- }
- } while ((ioend = next) != NULL);
-
- /* Pass 2 - submit I/O */
- ioend = head;
- do {
- next = ioend->io_list;
- bio = NULL;
-
- for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
-
- if (!bio) {
- retry:
- bio = xfs_alloc_ioend_bio(bh);
- } else if (bh->b_blocknr != lastblock + 1) {
- xfs_submit_ioend_bio(ioend, bio);
- goto retry;
- }
-
- if (bio_add_buffer(bio, bh) != bh->b_size) {
- xfs_submit_ioend_bio(ioend, bio);
- goto retry;
- }
-
- lastblock = bh->b_blocknr;
- }
- if (bio)
- xfs_submit_ioend_bio(ioend, bio);
- xfs_finish_ioend(ioend, 0);
- } while ((ioend = next) != NULL);
-}
-
-/*
- * Cancel submission of all buffer_heads so far in this endio.
- * Toss the endio too. Only ever called for the initial page
- * in a writepage request, so only ever one page.
- */
-STATIC void
-xfs_cancel_ioend(
- xfs_ioend_t *ioend)
-{
- xfs_ioend_t *next;
- struct buffer_head *bh, *next_bh;
-
- do {
- next = ioend->io_list;
- bh = ioend->io_buffer_head;
- do {
- next_bh = bh->b_private;
- clear_buffer_async_write(bh);
- unlock_buffer(bh);
- } while ((bh = next_bh) != NULL);
-
- vn_iowake(XFS_I(ioend->io_inode));
- mempool_free(ioend, xfs_ioend_pool);
- } while ((ioend = next) != NULL);
-}
-
-/*
- * Test to see if we've been building up a completion structure for
- * earlier buffers -- if so, we try to append to this ioend if we
- * can, otherwise we finish off any current ioend and start another.
- * Return true if we've finished the given ioend.
- */
-STATIC void
-xfs_add_to_ioend(
- struct inode *inode,
- struct buffer_head *bh,
- xfs_off_t offset,
- unsigned int type,
- xfs_ioend_t **result,
- int need_ioend)
-{
- xfs_ioend_t *ioend = *result;
-
- if (!ioend || need_ioend || type != ioend->io_type) {
- xfs_ioend_t *previous = *result;
-
- ioend = xfs_alloc_ioend(inode, type);
- ioend->io_offset = offset;
- ioend->io_buffer_head = bh;
- ioend->io_buffer_tail = bh;
- if (previous)
- previous->io_list = ioend;
- *result = ioend;
- } else {
- ioend->io_buffer_tail->b_private = bh;
- ioend->io_buffer_tail = bh;
- }
-
- bh->b_private = NULL;
- ioend->io_size += bh->b_size;
-}
-
-STATIC void
-xfs_map_buffer(
- struct buffer_head *bh,
- xfs_iomap_t *mp,
- xfs_off_t offset,
- uint block_bits)
-{
- sector_t bn;
-
- ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
-
- bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
- ((offset - mp->iomap_offset) >> block_bits);
-
- ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
-
- bh->b_blocknr = bn;
- set_buffer_mapped(bh);
-}
-
-STATIC void
-xfs_map_at_offset(
- struct buffer_head *bh,
- loff_t offset,
- int block_bits,
- xfs_iomap_t *iomapp)
-{
- ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
- ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
-
- lock_buffer(bh);
- xfs_map_buffer(bh, iomapp, offset, block_bits);
- bh->b_bdev = iomapp->iomap_target->bt_bdev;
- set_buffer_mapped(bh);
- clear_buffer_delay(bh);
- clear_buffer_unwritten(bh);
-}
-
-/*
- * Look for a page at index that is suitable for clustering.
- */
-STATIC unsigned int
-xfs_probe_page(
- struct page *page,
- unsigned int pg_offset,
- int mapped)
-{
- int ret = 0;
-
- if (PageWriteback(page))
- return 0;
-
- if (page->mapping && PageDirty(page)) {
- if (page_has_buffers(page)) {
- struct buffer_head *bh, *head;
-
- bh = head = page_buffers(page);
- do {
- if (!buffer_uptodate(bh))
- break;
- if (mapped != buffer_mapped(bh))
- break;
- ret += bh->b_size;
- if (ret >= pg_offset)
- break;
- } while ((bh = bh->b_this_page) != head);
- } else
- ret = mapped ? 0 : PAGE_CACHE_SIZE;
- }
-
- return ret;
-}
-
-STATIC size_t
-xfs_probe_cluster(
- struct inode *inode,
- struct page *startpage,
- struct buffer_head *bh,
- struct buffer_head *head,
- int mapped)
-{
- struct pagevec pvec;
- pgoff_t tindex, tlast, tloff;
- size_t total = 0;
- int done = 0, i;
-
- /* First sum forwards in this page */
- do {
- if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
- return total;
- total += bh->b_size;
- } while ((bh = bh->b_this_page) != head);
-
- /* if we reached the end of the page, sum forwards in following pages */
- tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
- tindex = startpage->index + 1;
-
- /* Prune this back to avoid pathological behavior */
- tloff = min(tlast, startpage->index + 64);
-
- pagevec_init(&pvec, 0);
- while (!done && tindex <= tloff) {
- unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
-
- if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
- break;
-
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
- size_t pg_offset, pg_len = 0;
-
- if (tindex == tlast) {
- pg_offset =
- i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
- if (!pg_offset) {
- done = 1;
- break;
- }
- } else
- pg_offset = PAGE_CACHE_SIZE;
-
- if (page->index == tindex && !TestSetPageLocked(page)) {
- pg_len = xfs_probe_page(page, pg_offset, mapped);
- unlock_page(page);
- }
-
- if (!pg_len) {
- done = 1;
- break;
- }
-
- total += pg_len;
- tindex++;
- }
-
- pagevec_release(&pvec);
- cond_resched();
- }
-
- return total;
-}
-
-/*
- * Test if a given page is suitable for writing as part of an unwritten
- * or delayed allocate extent.
- */
-STATIC int
-xfs_is_delayed_page(
- struct page *page,
- unsigned int type)
-{
- if (PageWriteback(page))
- return 0;
-
- if (page->mapping && page_has_buffers(page)) {
- struct buffer_head *bh, *head;
- int acceptable = 0;
-
- bh = head = page_buffers(page);
- do {
- if (buffer_unwritten(bh))
- acceptable = (type == IOMAP_UNWRITTEN);
- else if (buffer_delay(bh))
- acceptable = (type == IOMAP_DELAY);
- else if (buffer_dirty(bh) && buffer_mapped(bh))
- acceptable = (type == IOMAP_NEW);
- else
- break;
- } while ((bh = bh->b_this_page) != head);
-
- if (acceptable)
- return 1;
- }
-
- return 0;
-}
-
-/*
- * Allocate & map buffers for page given the extent map. Write it out.
- * except for the original page of a writepage, this is called on
- * delalloc/unwritten pages only, for the original page it is possible
- * that the page has no mapping at all.
- */
-STATIC int
-xfs_convert_page(
- struct inode *inode,
- struct page *page,
- loff_t tindex,
- xfs_iomap_t *mp,
- xfs_ioend_t **ioendp,
- struct writeback_control *wbc,
- int startio,
- int all_bh)
-{
- struct buffer_head *bh, *head;
- xfs_off_t end_offset;
- unsigned long p_offset;
- unsigned int type;
- int bbits = inode->i_blkbits;
- int len, page_dirty;
- int count = 0, done = 0, uptodate = 1;
- xfs_off_t offset = page_offset(page);
-
- if (page->index != tindex)
- goto fail;
- if (TestSetPageLocked(page))
- goto fail;
- if (PageWriteback(page))
- goto fail_unlock_page;
- if (page->mapping != inode->i_mapping)
- goto fail_unlock_page;
- if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
- goto fail_unlock_page;
-
- /*
- * page_dirty is initially a count of buffers on the page before
- * EOF and is decremented as we move each into a cleanable state.
- *
- * Derivation:
- *
- * End offset is the highest offset that this page should represent.
- * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
- * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
- * hence give us the correct page_dirty count. On any other page,
- * it will be zero and in that case we need page_dirty to be the
- * count of buffers on the page.
- */
- end_offset = min_t(unsigned long long,
- (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
- i_size_read(inode));
-
- len = 1 << inode->i_blkbits;
- p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
- PAGE_CACHE_SIZE);
- p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
- page_dirty = p_offset / len;
-
- bh = head = page_buffers(page);
- do {
- if (offset >= end_offset)
- break;
- if (!buffer_uptodate(bh))
- uptodate = 0;
- if (!(PageUptodate(page) || buffer_uptodate(bh))) {
- done = 1;
- continue;
- }
-
- if (buffer_unwritten(bh) || buffer_delay(bh)) {
- if (buffer_unwritten(bh))
- type = IOMAP_UNWRITTEN;
- else
- type = IOMAP_DELAY;
-
- if (!xfs_iomap_valid(mp, offset)) {
- done = 1;
- continue;
- }
-
- ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
- ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
-
- xfs_map_at_offset(bh, offset, bbits, mp);
- if (startio) {
- xfs_add_to_ioend(inode, bh, offset,
- type, ioendp, done);
- } else {
- set_buffer_dirty(bh);
- unlock_buffer(bh);
- mark_buffer_dirty(bh);
- }
- page_dirty--;
- count++;
- } else {
- type = IOMAP_NEW;
- if (buffer_mapped(bh) && all_bh && startio) {
- lock_buffer(bh);
- xfs_add_to_ioend(inode, bh, offset,
- type, ioendp, done);
- count++;
- page_dirty--;
- } else {
- done = 1;
- }
- }
- } while (offset += len, (bh = bh->b_this_page) != head);
-
- if (uptodate && bh == head)
- SetPageUptodate(page);
-
- if (startio) {
- if (count) {
- struct backing_dev_info *bdi;
-
- bdi = inode->i_mapping->backing_dev_info;
- wbc->nr_to_write--;
- if (bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- done = 1;
- } else if (wbc->nr_to_write <= 0) {
- done = 1;
- }
- }
- xfs_start_page_writeback(page, wbc, !page_dirty, count);
- }
-
- return done;
- fail_unlock_page:
- unlock_page(page);
- fail:
- return 1;
-}
-
-/*
- * Convert & write out a cluster of pages in the same extent as defined
- * by mp and following the start page.
- */
-STATIC void
-xfs_cluster_write(
- struct inode *inode,
- pgoff_t tindex,
- xfs_iomap_t *iomapp,
- xfs_ioend_t **ioendp,
- struct writeback_control *wbc,
- int startio,
- int all_bh,
- pgoff_t tlast)
-{
- struct pagevec pvec;
- int done = 0, i;
-
- pagevec_init(&pvec, 0);
- while (!done && tindex <= tlast) {
- unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
-
- if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
- break;
-
- for (i = 0; i < pagevec_count(&pvec); i++) {
- done = xfs_convert_page(inode, pvec.pages[i], tindex++,
- iomapp, ioendp, wbc, startio, all_bh);
- if (done)
- break;
- }
-
- pagevec_release(&pvec);
- cond_resched();
- }
-}
-
-/*
- * Calling this without startio set means we are being asked to make a dirty
- * page ready for freeing it's buffers. When called with startio set then
- * we are coming from writepage.
- *
- * When called with startio set it is important that we write the WHOLE
- * page if possible.
- * The bh->b_state's cannot know if any of the blocks or which block for
- * that matter are dirty due to mmap writes, and therefore bh uptodate is
- * only valid if the page itself isn't completely uptodate. Some layers
- * may clear the page dirty flag prior to calling write page, under the
- * assumption the entire page will be written out; by not writing out the
- * whole page the page can be reused before all valid dirty data is
- * written out. Note: in the case of a page that has been dirty'd by
- * mapwrite and but partially setup by block_prepare_write the
- * bh->b_states's will not agree and only ones setup by BPW/BCW will have
- * valid state, thus the whole page must be written out thing.
- */
-
-STATIC int
-xfs_page_state_convert(
- struct inode *inode,
- struct page *page,
- struct writeback_control *wbc,
- int startio,
- int unmapped) /* also implies page uptodate */
-{
- struct buffer_head *bh, *head;
- xfs_iomap_t iomap;
- xfs_ioend_t *ioend = NULL, *iohead = NULL;
- loff_t offset;
- unsigned long p_offset = 0;
- unsigned int type;
- __uint64_t end_offset;
- pgoff_t end_index, last_index, tlast;
- ssize_t size, len;
- int flags, err, iomap_valid = 0, uptodate = 1;
- int page_dirty, count = 0;
- int trylock = 0;
- int all_bh = unmapped;
-
- if (startio) {
- if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
- trylock |= BMAPI_TRYLOCK;
- }
-
- /* Is this page beyond the end of the file? */
- offset = i_size_read(inode);
- end_index = offset >> PAGE_CACHE_SHIFT;
- last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
- if (page->index >= end_index) {
- if ((page->index >= end_index + 1) ||
- !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
- if (startio)
- unlock_page(page);
- return 0;
- }
- }
-
- /*
- * page_dirty is initially a count of buffers on the page before
- * EOF and is decremented as we move each into a cleanable state.
- *
- * Derivation:
- *
- * End offset is the highest offset that this page should represent.
- * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
- * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
- * hence give us the correct page_dirty count. On any other page,
- * it will be zero and in that case we need page_dirty to be the
- * count of buffers on the page.
- */
- end_offset = min_t(unsigned long long,
- (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
- len = 1 << inode->i_blkbits;
- p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
- PAGE_CACHE_SIZE);
- p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
- page_dirty = p_offset / len;
-
- bh = head = page_buffers(page);
- offset = page_offset(page);
- flags = BMAPI_READ;
- type = IOMAP_NEW;
-
- /* TODO: cleanup count and page_dirty */
-
- do {
- if (offset >= end_offset)
- break;
- if (!buffer_uptodate(bh))
- uptodate = 0;
- if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
- /*
- * the iomap is actually still valid, but the ioend
- * isn't. shouldn't happen too often.
- */
- iomap_valid = 0;
- continue;
- }
-
- if (iomap_valid)
- iomap_valid = xfs_iomap_valid(&iomap, offset);
-
- /*
- * First case, map an unwritten extent and prepare for
- * extent state conversion transaction on completion.
- *
- * Second case, allocate space for a delalloc buffer.
- * We can return EAGAIN here in the release page case.
- *
- * Third case, an unmapped buffer was found, and we are
- * in a path where we need to write the whole page out.
- */
- if (buffer_unwritten(bh) || buffer_delay(bh) ||
- ((buffer_uptodate(bh) || PageUptodate(page)) &&
- !buffer_mapped(bh) && (unmapped || startio))) {
- int new_ioend = 0;
-
- /*
- * Make sure we don't use a read-only iomap
- */
- if (flags == BMAPI_READ)
- iomap_valid = 0;
-
- if (buffer_unwritten(bh)) {
- type = IOMAP_UNWRITTEN;
- flags = BMAPI_WRITE | BMAPI_IGNSTATE;
- } else if (buffer_delay(bh)) {
- type = IOMAP_DELAY;
- flags = BMAPI_ALLOCATE | trylock;
- } else {
- type = IOMAP_NEW;
- flags = BMAPI_WRITE | BMAPI_MMAP;
- }
-
- if (!iomap_valid) {
- /*
- * if we didn't have a valid mapping then we
- * need to ensure that we put the new mapping
- * in a new ioend structure. This needs to be
- * done to ensure that the ioends correctly
- * reflect the block mappings at io completion
- * for unwritten extent conversion.
- */
- new_ioend = 1;
- if (type == IOMAP_NEW) {
- size = xfs_probe_cluster(inode,
- page, bh, head, 0);
- } else {
- size = len;
- }
-
- err = xfs_map_blocks(inode, offset, size,
- &iomap, flags);
- if (err)
- goto error;
- iomap_valid = xfs_iomap_valid(&iomap, offset);
- }
- if (iomap_valid) {
- xfs_map_at_offset(bh, offset,
- inode->i_blkbits, &iomap);
- if (startio) {
- xfs_add_to_ioend(inode, bh, offset,
- type, &ioend,
- new_ioend);
- } else {
- set_buffer_dirty(bh);
- unlock_buffer(bh);
- mark_buffer_dirty(bh);
- }
- page_dirty--;
- count++;
- }
- } else if (buffer_uptodate(bh) && startio) {
- /*
- * we got here because the buffer is already mapped.
- * That means it must already have extents allocated
- * underneath it. Map the extent by reading it.
- */
- if (!iomap_valid || flags != BMAPI_READ) {
- flags = BMAPI_READ;
- size = xfs_probe_cluster(inode, page, bh,
- head, 1);
- err = xfs_map_blocks(inode, offset, size,
- &iomap, flags);
- if (err)
- goto error;
- iomap_valid = xfs_iomap_valid(&iomap, offset);
- }
-
- /*
- * We set the type to IOMAP_NEW in case we are doing a
- * small write at EOF that is extending the file but
- * without needing an allocation. We need to update the
- * file size on I/O completion in this case so it is
- * the same case as having just allocated a new extent
- * that we are writing into for the first time.
- */
- type = IOMAP_NEW;
- if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
- ASSERT(buffer_mapped(bh));
- if (iomap_valid)
- all_bh = 1;
- xfs_add_to_ioend(inode, bh, offset, type,
- &ioend, !iomap_valid);
- page_dirty--;
- count++;
- } else {
- iomap_valid = 0;
- }
- } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
- (unmapped || startio)) {
- iomap_valid = 0;
- }
-
- if (!iohead)
- iohead = ioend;
-
- } while (offset += len, ((bh = bh->b_this_page) != head));
-
- if (uptodate && bh == head)
- SetPageUptodate(page);
-
- if (startio)
- xfs_start_page_writeback(page, wbc, 1, count);
-
- if (ioend && iomap_valid) {
- offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
- PAGE_CACHE_SHIFT;
- tlast = min_t(pgoff_t, offset, last_index);
- xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
- wbc, startio, all_bh, tlast);
- }
-
- if (iohead)
- xfs_submit_ioend(iohead);
-
- return page_dirty;
-
-error:
- if (iohead)
- xfs_cancel_ioend(iohead);
-
- /*
- * If it's delalloc and we have nowhere to put it,
- * throw it away, unless the lower layers told
- * us to try again.
- */
- if (err != -EAGAIN) {
- if (!unmapped)
- block_invalidatepage(page, 0);
- ClearPageUptodate(page);
- }
- return err;
-}
-
-/*
- * writepage: Called from one of two places:
- *
- * 1. we are flushing a delalloc buffer head.
- *
- * 2. we are writing out a dirty page. Typically the page dirty
- * state is cleared before we get here. In this case is it
- * conceivable we have no buffer heads.
- *
- * For delalloc space on the page we need to allocate space and
- * flush it. For unmapped buffer heads on the page we should
- * allocate space if the page is uptodate. For any other dirty
- * buffer heads on the page we should flush them.
- *
- * If we detect that a transaction would be required to flush
- * the page, we have to check the process flags first, if we
- * are already in a transaction or disk I/O during allocations
- * is off, we need to fail the writepage and redirty the page.
- */
-
-STATIC int
-xfs_vm_writepage(
- struct page *page,
- struct writeback_control *wbc)
-{
- int error;
- int need_trans;
- int delalloc, unmapped, unwritten;
- struct inode *inode = page->mapping->host;
-
- xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
-
- /*
- * We need a transaction if:
- * 1. There are delalloc buffers on the page
- * 2. The page is uptodate and we have unmapped buffers
- * 3. The page is uptodate and we have no buffers
- * 4. There are unwritten buffers on the page
- */
-
- if (!page_has_buffers(page)) {
- unmapped = 1;
- need_trans = 1;
- } else {
- xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
- if (!PageUptodate(page))
- unmapped = 0;
- need_trans = delalloc + unmapped + unwritten;
- }
-
- /*
- * If we need a transaction and the process flags say
- * we are already in a transaction, or no IO is allowed
- * then mark the page dirty again and leave the page
- * as is.
- */
- if (current_test_flags(PF_FSTRANS) && need_trans)
- goto out_fail;
-
- /*
- * Delay hooking up buffer heads until we have
- * made our go/no-go decision.
- */
- if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << inode->i_blkbits, 0);
-
- /*
- * Convert delayed allocate, unwritten or unmapped space
- * to real space and flush out to disk.
- */
- error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
- if (error == -EAGAIN)
- goto out_fail;
- if (unlikely(error < 0))
- goto out_unlock;
-
- return 0;
-
-out_fail:
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
-out_unlock:
- unlock_page(page);
- return error;
-}
-
-STATIC int
-xfs_vm_writepages(
- struct address_space *mapping,
- struct writeback_control *wbc)
-{
- xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
- return generic_writepages(mapping, wbc);
-}
-
-/*
- * Called to move a page into cleanable state - and from there
- * to be released. Possibly the page is already clean. We always
- * have buffer heads in this call.
- *
- * Returns 0 if the page is ok to release, 1 otherwise.
- *
- * Possible scenarios are:
- *
- * 1. We are being called to release a page which has been written
- * to via regular I/O. buffer heads will be dirty and possibly
- * delalloc. If no delalloc buffer heads in this case then we
- * can just return zero.
- *
- * 2. We are called to release a page which has been written via
- * mmap, all we need to do is ensure there is no delalloc
- * state in the buffer heads, if not we can let the caller
- * free them and we should come back later via writepage.
- */
-STATIC int
-xfs_vm_releasepage(
- struct page *page,
- gfp_t gfp_mask)
-{
- struct inode *inode = page->mapping->host;
- int dirty, delalloc, unmapped, unwritten;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 1,
- };
-
- xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
-
- if (!page_has_buffers(page))
- return 0;
-
- xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
- if (!delalloc && !unwritten)
- goto free_buffers;
-
- if (!(gfp_mask & __GFP_FS))
- return 0;
-
- /* If we are already inside a transaction or the thread cannot
- * do I/O, we cannot release this page.
- */
- if (current_test_flags(PF_FSTRANS))
- return 0;
-
- /*
- * Convert delalloc space to real space, do not flush the
- * data out to disk, that will be done by the caller.
- * Never need to allocate space here - we will always
- * come back to writepage in that case.
- */
- dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
- if (dirty == 0 && !unwritten)
- goto free_buffers;
- return 0;
-
-free_buffers:
- return try_to_free_buffers(page);
-}
-
-STATIC int
-__xfs_get_blocks(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- int create,
- int direct,
- bmapi_flags_t flags)
-{
- xfs_iomap_t iomap;
- xfs_off_t offset;
- ssize_t size;
- int niomap = 1;
- int error;
-
- offset = (xfs_off_t)iblock << inode->i_blkbits;
- ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
- size = bh_result->b_size;
- error = xfs_iomap(XFS_I(inode), offset, size,
- create ? flags : BMAPI_READ, &iomap, &niomap);
- if (error)
- return -error;
- if (niomap == 0)
- return 0;
-
- if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
- /*
- * For unwritten extents do not report a disk address on
- * the read case (treat as if we're reading into a hole).
- */
- if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
- xfs_map_buffer(bh_result, &iomap, offset,
- inode->i_blkbits);
- }
- if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
- if (direct)
- bh_result->b_private = inode;
- set_buffer_unwritten(bh_result);
- }
- }
-
- /*
- * If this is a realtime file, data may be on a different device.
- * to that pointed to from the buffer_head b_bdev currently.
- */
- bh_result->b_bdev = iomap.iomap_target->bt_bdev;
-
- /*
- * If we previously allocated a block out beyond eof and we are now
- * coming back to use it then we will need to flag it as new even if it
- * has a disk address.
- *
- * With sub-block writes into unwritten extents we also need to mark
- * the buffer as new so that the unwritten parts of the buffer gets
- * correctly zeroed.
- */
- if (create &&
- ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
- (offset >= i_size_read(inode)) ||
- (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
- set_buffer_new(bh_result);
-
- if (iomap.iomap_flags & IOMAP_DELAY) {
- BUG_ON(direct);
- if (create) {
- set_buffer_uptodate(bh_result);
- set_buffer_mapped(bh_result);
- set_buffer_delay(bh_result);
- }
- }
-
- if (direct || size > (1 << inode->i_blkbits)) {
- ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
- offset = min_t(xfs_off_t,
- iomap.iomap_bsize - iomap.iomap_delta, size);
- bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
- }
-
- return 0;
-}
-
-int
-xfs_get_blocks(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- int create)
-{
- return __xfs_get_blocks(inode, iblock,
- bh_result, create, 0, BMAPI_WRITE);
-}
-
-STATIC int
-xfs_get_blocks_direct(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- int create)
-{
- return __xfs_get_blocks(inode, iblock,
- bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
-}
-
-STATIC void
-xfs_end_io_direct(
- struct kiocb *iocb,
- loff_t offset,
- ssize_t size,
- void *private)
-{
- xfs_ioend_t *ioend = iocb->private;
-
- /*
- * Non-NULL private data means we need to issue a transaction to
- * convert a range from unwritten to written extents. This needs
- * to happen from process context but aio+dio I/O completion
- * happens from irq context so we need to defer it to a workqueue.
- * This is not necessary for synchronous direct I/O, but we do
- * it anyway to keep the code uniform and simpler.
- *
- * Well, if only it were that simple. Because synchronous direct I/O
- * requires extent conversion to occur *before* we return to userspace,
- * we have to wait for extent conversion to complete. Look at the
- * iocb that has been passed to us to determine if this is AIO or
- * not. If it is synchronous, tell xfs_finish_ioend() to kick the
- * workqueue and wait for it to complete.
- *
- * The core direct I/O code might be changed to always call the
- * completion handler in the future, in which case all this can
- * go away.
- */
- ioend->io_offset = offset;
- ioend->io_size = size;
- if (ioend->io_type == IOMAP_READ) {
- xfs_finish_ioend(ioend, 0);
- } else if (private && size > 0) {
- xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
- } else {
- /*
- * A direct I/O write ioend starts it's life in unwritten
- * state in case they map an unwritten extent. This write
- * didn't map an unwritten extent so switch it's completion
- * handler.
- */
- INIT_WORK(&ioend->io_work, xfs_end_bio_written);
- xfs_finish_ioend(ioend, 0);
- }
-
- /*
- * blockdev_direct_IO can return an error even after the I/O
- * completion handler was called. Thus we need to protect
- * against double-freeing.
- */
- iocb->private = NULL;
-}
-
-STATIC ssize_t
-xfs_vm_direct_IO(
- int rw,
- struct kiocb *iocb,
- const struct iovec *iov,
- loff_t offset,
- unsigned long nr_segs)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct block_device *bdev;
- ssize_t ret;
-
- bdev = xfs_find_bdev_for_inode(XFS_I(inode));
-
- if (rw == WRITE) {
- iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
- ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
- bdev, iov, offset, nr_segs,
- xfs_get_blocks_direct,
- xfs_end_io_direct);
- } else {
- iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
- ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
- bdev, iov, offset, nr_segs,
- xfs_get_blocks_direct,
- xfs_end_io_direct);
- }
-
- if (unlikely(ret != -EIOCBQUEUED && iocb->private))
- xfs_destroy_ioend(iocb->private);
- return ret;
-}
-
-STATIC int
-xfs_vm_write_begin(
- struct file *file,
- struct address_space *mapping,
- loff_t pos,
- unsigned len,
- unsigned flags,
- struct page **pagep,
- void **fsdata)
-{
- *pagep = NULL;
- return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- xfs_get_blocks);
-}
-
-STATIC sector_t
-xfs_vm_bmap(
- struct address_space *mapping,
- sector_t block)
-{
- struct inode *inode = (struct inode *)mapping->host;
- struct xfs_inode *ip = XFS_I(inode);
-
- xfs_itrace_entry(XFS_I(inode));
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
- xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return generic_block_bmap(mapping, block, xfs_get_blocks);
-}
-
-STATIC int
-xfs_vm_readpage(
- struct file *unused,
- struct page *page)
-{
- return mpage_readpage(page, xfs_get_blocks);
-}
-
-STATIC int
-xfs_vm_readpages(
- struct file *unused,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned nr_pages)
-{
- return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
-}
-
-STATIC void
-xfs_vm_invalidatepage(
- struct page *page,
- unsigned long offset)
-{
- xfs_page_trace(XFS_INVALIDPAGE_ENTER,
- page->mapping->host, page, offset);
- block_invalidatepage(page, offset);
-}
-
-const struct address_space_operations xfs_address_space_operations = {
- .readpage = xfs_vm_readpage,
- .readpages = xfs_vm_readpages,
- .writepage = xfs_vm_writepage,
- .writepages = xfs_vm_writepages,
- .sync_page = block_sync_page,
- .releasepage = xfs_vm_releasepage,
- .invalidatepage = xfs_vm_invalidatepage,
- .write_begin = xfs_vm_write_begin,
- .write_end = generic_write_end,
- .bmap = xfs_vm_bmap,
- .direct_IO = xfs_vm_direct_IO,
- .migratepage = buffer_migrate_page,
-};
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
deleted file mode 100644
index 98e0e86093b..00000000000
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ /dev/null
@@ -1,1854 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include <linux/stddef.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/bio.h>
-#include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-#include <linux/workqueue.h>
-#include <linux/percpu.h>
-#include <linux/blkdev.h>
-#include <linux/hash.h>
-#include <linux/kthread.h>
-#include <linux/migrate.h>
-#include <linux/backing-dev.h>
-#include <linux/freezer.h>
-
-static kmem_zone_t *xfs_buf_zone;
-STATIC int xfsbufd(void *);
-STATIC int xfsbufd_wakeup(int, gfp_t);
-STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
-static struct shrinker xfs_buf_shake = {
- .shrink = xfsbufd_wakeup,
- .seeks = DEFAULT_SEEKS,
-};
-
-static struct workqueue_struct *xfslogd_workqueue;
-struct workqueue_struct *xfsdatad_workqueue;
-
-#ifdef XFS_BUF_TRACE
-void
-xfs_buf_trace(
- xfs_buf_t *bp,
- char *id,
- void *data,
- void *ra)
-{
- ktrace_enter(xfs_buf_trace_buf,
- bp, id,
- (void *)(unsigned long)bp->b_flags,
- (void *)(unsigned long)bp->b_hold.counter,
- (void *)(unsigned long)bp->b_sema.count.counter,
- (void *)current,
- data, ra,
- (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
- (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
- (void *)(unsigned long)bp->b_buffer_length,
- NULL, NULL, NULL, NULL, NULL);
-}
-ktrace_t *xfs_buf_trace_buf;
-#define XFS_BUF_TRACE_SIZE 4096
-#define XB_TRACE(bp, id, data) \
- xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
-#else
-#define XB_TRACE(bp, id, data) do { } while (0)
-#endif
-
-#ifdef XFS_BUF_LOCK_TRACKING
-# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
-# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
-# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
-#else
-# define XB_SET_OWNER(bp) do { } while (0)
-# define XB_CLEAR_OWNER(bp) do { } while (0)
-# define XB_GET_OWNER(bp) do { } while (0)
-#endif
-
-#define xb_to_gfp(flags) \
- ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
- ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
-
-#define xb_to_km(flags) \
- (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
-
-#define xfs_buf_allocate(flags) \
- kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
-#define xfs_buf_deallocate(bp) \
- kmem_zone_free(xfs_buf_zone, (bp));
-
-/*
- * Page Region interfaces.
- *
- * For pages in filesystems where the blocksize is smaller than the
- * pagesize, we use the page->private field (long) to hold a bitmap
- * of uptodate regions within the page.
- *
- * Each such region is "bytes per page / bits per long" bytes long.
- *
- * NBPPR == number-of-bytes-per-page-region
- * BTOPR == bytes-to-page-region (rounded up)
- * BTOPRT == bytes-to-page-region-truncated (rounded down)
- */
-#if (BITS_PER_LONG == 32)
-#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
-#elif (BITS_PER_LONG == 64)
-#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
-#else
-#error BITS_PER_LONG must be 32 or 64
-#endif
-#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
-#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
-#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
-
-STATIC unsigned long
-page_region_mask(
- size_t offset,
- size_t length)
-{
- unsigned long mask;
- int first, final;
-
- first = BTOPR(offset);
- final = BTOPRT(offset + length - 1);
- first = min(first, final);
-
- mask = ~0UL;
- mask <<= BITS_PER_LONG - (final - first);
- mask >>= BITS_PER_LONG - (final);
-
- ASSERT(offset + length <= PAGE_CACHE_SIZE);
- ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
-
- return mask;
-}
-
-STATIC_INLINE void
-set_page_region(
- struct page *page,
- size_t offset,
- size_t length)
-{
- set_page_private(page,
- page_private(page) | page_region_mask(offset, length));
- if (page_private(page) == ~0UL)
- SetPageUptodate(page);
-}
-
-STATIC_INLINE int
-test_page_region(
- struct page *page,
- size_t offset,
- size_t length)
-{
- unsigned long mask = page_region_mask(offset, length);
-
- return (mask && (page_private(page) & mask) == mask);
-}
-
-/*
- * Mapping of multi-page buffers into contiguous virtual space
- */
-
-typedef struct a_list {
- void *vm_addr;
- struct a_list *next;
-} a_list_t;
-
-static a_list_t *as_free_head;
-static int as_list_len;
-static DEFINE_SPINLOCK(as_lock);
-
-/*
- * Try to batch vunmaps because they are costly.
- */
-STATIC void
-free_address(
- void *addr)
-{
- a_list_t *aentry;
-
-#ifdef CONFIG_XEN
- /*
- * Xen needs to be able to make sure it can get an exclusive
- * RO mapping of pages it wants to turn into a pagetable. If
- * a newly allocated page is also still being vmap()ed by xfs,
- * it will cause pagetable construction to fail. This is a
- * quick workaround to always eagerly unmap pages so that Xen
- * is happy.
- */
- vunmap(addr);
- return;
-#endif
-
- aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
- if (likely(aentry)) {
- spin_lock(&as_lock);
- aentry->next = as_free_head;
- aentry->vm_addr = addr;
- as_free_head = aentry;
- as_list_len++;
- spin_unlock(&as_lock);
- } else {
- vunmap(addr);
- }
-}
-
-STATIC void
-purge_addresses(void)
-{
- a_list_t *aentry, *old;
-
- if (as_free_head == NULL)
- return;
-
- spin_lock(&as_lock);
- aentry = as_free_head;
- as_free_head = NULL;
- as_list_len = 0;
- spin_unlock(&as_lock);
-
- while ((old = aentry) != NULL) {
- vunmap(aentry->vm_addr);
- aentry = aentry->next;
- kfree(old);
- }
-}
-
-/*
- * Internal xfs_buf_t object manipulation
- */
-
-STATIC void
-_xfs_buf_initialize(
- xfs_buf_t *bp,
- xfs_buftarg_t *target,
- xfs_off_t range_base,
- size_t range_length,
- xfs_buf_flags_t flags)
-{
- /*
- * We don't want certain flags to appear in b_flags.
- */
- flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
-
- memset(bp, 0, sizeof(xfs_buf_t));
- atomic_set(&bp->b_hold, 1);
- init_MUTEX_LOCKED(&bp->b_iodonesema);
- INIT_LIST_HEAD(&bp->b_list);
- INIT_LIST_HEAD(&bp->b_hash_list);
- init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
- XB_SET_OWNER(bp);
- bp->b_target = target;
- bp->b_file_offset = range_base;
- /*
- * Set buffer_length and count_desired to the same value initially.
- * I/O routines should use count_desired, which will be the same in
- * most cases but may be reset (e.g. XFS recovery).
- */
- bp->b_buffer_length = bp->b_count_desired = range_length;
- bp->b_flags = flags;
- bp->b_bn = XFS_BUF_DADDR_NULL;
- atomic_set(&bp->b_pin_count, 0);
- init_waitqueue_head(&bp->b_waiters);
-
- XFS_STATS_INC(xb_create);
- XB_TRACE(bp, "initialize", target);
-}
-
-/*
- * Allocate a page array capable of holding a specified number
- * of pages, and point the page buf at it.
- */
-STATIC int
-_xfs_buf_get_pages(
- xfs_buf_t *bp,
- int page_count,
- xfs_buf_flags_t flags)
-{
- /* Make sure that we have a page list */
- if (bp->b_pages == NULL) {
- bp->b_offset = xfs_buf_poff(bp->b_file_offset);
- bp->b_page_count = page_count;
- if (page_count <= XB_PAGES) {
- bp->b_pages = bp->b_page_array;
- } else {
- bp->b_pages = kmem_alloc(sizeof(struct page *) *
- page_count, xb_to_km(flags));
- if (bp->b_pages == NULL)
- return -ENOMEM;
- }
- memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
- }
- return 0;
-}
-
-/*
- * Frees b_pages if it was allocated.
- */
-STATIC void
-_xfs_buf_free_pages(
- xfs_buf_t *bp)
-{
- if (bp->b_pages != bp->b_page_array) {
- kmem_free(bp->b_pages,
- bp->b_page_count * sizeof(struct page *));
- }
-}
-
-/*
- * Releases the specified buffer.
- *
- * The modification state of any associated pages is left unchanged.
- * The buffer most not be on any hash - use xfs_buf_rele instead for
- * hashed and refcounted buffers
- */
-void
-xfs_buf_free(
- xfs_buf_t *bp)
-{
- XB_TRACE(bp, "free", 0);
-
- ASSERT(list_empty(&bp->b_hash_list));
-
- if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
- uint i;
-
- if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
- free_address(bp->b_addr - bp->b_offset);
-
- for (i = 0; i < bp->b_page_count; i++) {
- struct page *page = bp->b_pages[i];
-
- if (bp->b_flags & _XBF_PAGE_CACHE)
- ASSERT(!PagePrivate(page));
- page_cache_release(page);
- }
- _xfs_buf_free_pages(bp);
- }
-
- xfs_buf_deallocate(bp);
-}
-
-/*
- * Finds all pages for buffer in question and builds it's page list.
- */
-STATIC int
-_xfs_buf_lookup_pages(
- xfs_buf_t *bp,
- uint flags)
-{
- struct address_space *mapping = bp->b_target->bt_mapping;
- size_t blocksize = bp->b_target->bt_bsize;
- size_t size = bp->b_count_desired;
- size_t nbytes, offset;
- gfp_t gfp_mask = xb_to_gfp(flags);
- unsigned short page_count, i;
- pgoff_t first;
- xfs_off_t end;
- int error;
-
- end = bp->b_file_offset + bp->b_buffer_length;
- page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
-
- error = _xfs_buf_get_pages(bp, page_count, flags);
- if (unlikely(error))
- return error;
- bp->b_flags |= _XBF_PAGE_CACHE;
-
- offset = bp->b_offset;
- first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
-
- for (i = 0; i < bp->b_page_count; i++) {
- struct page *page;
- uint retries = 0;
-
- retry:
- page = find_or_create_page(mapping, first + i, gfp_mask);
- if (unlikely(page == NULL)) {
- if (flags & XBF_READ_AHEAD) {
- bp->b_page_count = i;
- for (i = 0; i < bp->b_page_count; i++)
- unlock_page(bp->b_pages[i]);
- return -ENOMEM;
- }
-
- /*
- * This could deadlock.
- *
- * But until all the XFS lowlevel code is revamped to
- * handle buffer allocation failures we can't do much.
- */
- if (!(++retries % 100))
- printk(KERN_ERR
- "XFS: possible memory allocation "
- "deadlock in %s (mode:0x%x)\n",
- __func__, gfp_mask);
-
- XFS_STATS_INC(xb_page_retries);
- xfsbufd_wakeup(0, gfp_mask);
- congestion_wait(WRITE, HZ/50);
- goto retry;
- }
-
- XFS_STATS_INC(xb_page_found);
-
- nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
- size -= nbytes;
-
- ASSERT(!PagePrivate(page));
- if (!PageUptodate(page)) {
- page_count--;
- if (blocksize >= PAGE_CACHE_SIZE) {
- if (flags & XBF_READ)
- bp->b_flags |= _XBF_PAGE_LOCKED;
- } else if (!PagePrivate(page)) {
- if (test_page_region(page, offset, nbytes))
- page_count++;
- }
- }
-
- bp->b_pages[i] = page;
- offset = 0;
- }
-
- if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
- for (i = 0; i < bp->b_page_count; i++)
- unlock_page(bp->b_pages[i]);
- }
-
- if (page_count == bp->b_page_count)
- bp->b_flags |= XBF_DONE;
-
- XB_TRACE(bp, "lookup_pages", (long)page_count);
- return error;
-}
-
-/*
- * Map buffer into kernel address-space if nessecary.
- */
-STATIC int
-_xfs_buf_map_pages(
- xfs_buf_t *bp,
- uint flags)
-{
- /* A single page buffer is always mappable */
- if (bp->b_page_count == 1) {
- bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
- bp->b_flags |= XBF_MAPPED;
- } else if (flags & XBF_MAPPED) {
- if (as_list_len > 64)
- purge_addresses();
- bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
- VM_MAP, PAGE_KERNEL);
- if (unlikely(bp->b_addr == NULL))
- return -ENOMEM;
- bp->b_addr += bp->b_offset;
- bp->b_flags |= XBF_MAPPED;
- }
-
- return 0;
-}
-
-/*
- * Finding and Reading Buffers
- */
-
-/*
- * Look up, and creates if absent, a lockable buffer for
- * a given range of an inode. The buffer is returned
- * locked. If other overlapping buffers exist, they are
- * released before the new buffer is created and locked,
- * which may imply that this call will block until those buffers
- * are unlocked. No I/O is implied by this call.
- */
-xfs_buf_t *
-_xfs_buf_find(
- xfs_buftarg_t *btp, /* block device target */
- xfs_off_t ioff, /* starting offset of range */
- size_t isize, /* length of range */
- xfs_buf_flags_t flags,
- xfs_buf_t *new_bp)
-{
- xfs_off_t range_base;
- size_t range_length;
- xfs_bufhash_t *hash;
- xfs_buf_t *bp, *n;
-
- range_base = (ioff << BBSHIFT);
- range_length = (isize << BBSHIFT);
-
- /* Check for IOs smaller than the sector size / not sector aligned */
- ASSERT(!(range_length < (1 << btp->bt_sshift)));
- ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
-
- hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
-
- spin_lock(&hash->bh_lock);
-
- list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
- ASSERT(btp == bp->b_target);
- if (bp->b_file_offset == range_base &&
- bp->b_buffer_length == range_length) {
- /*
- * If we look at something, bring it to the
- * front of the list for next time.
- */
- atomic_inc(&bp->b_hold);
- list_move(&bp->b_hash_list, &hash->bh_list);
- goto found;
- }
- }
-
- /* No match found */
- if (new_bp) {
- _xfs_buf_initialize(new_bp, btp, range_base,
- range_length, flags);
- new_bp->b_hash = hash;
- list_add(&new_bp->b_hash_list, &hash->bh_list);
- } else {
- XFS_STATS_INC(xb_miss_locked);
- }
-
- spin_unlock(&hash->bh_lock);
- return new_bp;
-
-found:
- spin_unlock(&hash->bh_lock);
-
- /* Attempt to get the semaphore without sleeping,
- * if this does not work then we need to drop the
- * spinlock and do a hard attempt on the semaphore.
- */
- if (down_trylock(&bp->b_sema)) {
- if (!(flags & XBF_TRYLOCK)) {
- /* wait for buffer ownership */
- XB_TRACE(bp, "get_lock", 0);
- xfs_buf_lock(bp);
- XFS_STATS_INC(xb_get_locked_waited);
- } else {
- /* We asked for a trylock and failed, no need
- * to look at file offset and length here, we
- * know that this buffer at least overlaps our
- * buffer and is locked, therefore our buffer
- * either does not exist, or is this buffer.
- */
- xfs_buf_rele(bp);
- XFS_STATS_INC(xb_busy_locked);
- return NULL;
- }
- } else {
- /* trylock worked */
- XB_SET_OWNER(bp);
- }
-
- if (bp->b_flags & XBF_STALE) {
- ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
- bp->b_flags &= XBF_MAPPED;
- }
- XB_TRACE(bp, "got_lock", 0);
- XFS_STATS_INC(xb_get_locked);
- return bp;
-}
-
-/*
- * Assembles a buffer covering the specified range.
- * Storage in memory for all portions of the buffer will be allocated,
- * although backing storage may not be.
- */
-xfs_buf_t *
-xfs_buf_get_flags(
- xfs_buftarg_t *target,/* target for buffer */
- xfs_off_t ioff, /* starting offset of range */
- size_t isize, /* length of range */
- xfs_buf_flags_t flags)
-{
- xfs_buf_t *bp, *new_bp;
- int error = 0, i;
-
- new_bp = xfs_buf_allocate(flags);
- if (unlikely(!new_bp))
- return NULL;
-
- bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
- if (bp == new_bp) {
- error = _xfs_buf_lookup_pages(bp, flags);
- if (error)
- goto no_buffer;
- } else {
- xfs_buf_deallocate(new_bp);
- if (unlikely(bp == NULL))
- return NULL;
- }
-
- for (i = 0; i < bp->b_page_count; i++)
- mark_page_accessed(bp->b_pages[i]);
-
- if (!(bp->b_flags & XBF_MAPPED)) {
- error = _xfs_buf_map_pages(bp, flags);
- if (unlikely(error)) {
- printk(KERN_WARNING "%s: failed to map pages\n",
- __func__);
- goto no_buffer;
- }
- }
-
- XFS_STATS_INC(xb_get);
-
- /*
- * Always fill in the block number now, the mapped cases can do
- * their own overlay of this later.
- */
- bp->b_bn = ioff;
- bp->b_count_desired = bp->b_buffer_length;
-
- XB_TRACE(bp, "get", (unsigned long)flags);
- return bp;
-
- no_buffer:
- if (flags & (XBF_LOCK | XBF_TRYLOCK))
- xfs_buf_unlock(bp);
- xfs_buf_rele(bp);
- return NULL;
-}
-
-xfs_buf_t *
-xfs_buf_read_flags(
- xfs_buftarg_t *target,
- xfs_off_t ioff,
- size_t isize,
- xfs_buf_flags_t flags)
-{
- xfs_buf_t *bp;
-
- flags |= XBF_READ;
-
- bp = xfs_buf_get_flags(target, ioff, isize, flags);
- if (bp) {
- if (!XFS_BUF_ISDONE(bp)) {
- XB_TRACE(bp, "read", (unsigned long)flags);
- XFS_STATS_INC(xb_get_read);
- xfs_buf_iostart(bp, flags);
- } else if (flags & XBF_ASYNC) {
- XB_TRACE(bp, "read_async", (unsigned long)flags);
- /*
- * Read ahead call which is already satisfied,
- * drop the buffer
- */
- goto no_buffer;
- } else {
- XB_TRACE(bp, "read_done", (unsigned long)flags);
- /* We do not want read in the flags */
- bp->b_flags &= ~XBF_READ;
- }
- }
-
- return bp;
-
- no_buffer:
- if (flags & (XBF_LOCK | XBF_TRYLOCK))
- xfs_buf_unlock(bp);
- xfs_buf_rele(bp);
- return NULL;
-}
-
-/*
- * If we are not low on memory then do the readahead in a deadlock
- * safe manner.
- */
-void
-xfs_buf_readahead(
- xfs_buftarg_t *target,
- xfs_off_t ioff,
- size_t isize,
- xfs_buf_flags_t flags)
-{
- struct backing_dev_info *bdi;
-
- bdi = target->bt_mapping->backing_dev_info;
- if (bdi_read_congested(bdi))
- return;
-
- flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
- xfs_buf_read_flags(target, ioff, isize, flags);
-}
-
-xfs_buf_t *
-xfs_buf_get_empty(
- size_t len,
- xfs_buftarg_t *target)
-{
- xfs_buf_t *bp;
-
- bp = xfs_buf_allocate(0);
- if (bp)
- _xfs_buf_initialize(bp, target, 0, len, 0);
- return bp;
-}
-
-static inline struct page *
-mem_to_page(
- void *addr)
-{
- if ((!is_vmalloc_addr(addr))) {
- return virt_to_page(addr);
- } else {
- return vmalloc_to_page(addr);
- }
-}
-
-int
-xfs_buf_associate_memory(
- xfs_buf_t *bp,
- void *mem,
- size_t len)
-{
- int rval;
- int i = 0;
- unsigned long pageaddr;
- unsigned long offset;
- size_t buflen;
- int page_count;
-
- pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
- offset = (unsigned long)mem - pageaddr;
- buflen = PAGE_CACHE_ALIGN(len + offset);
- page_count = buflen >> PAGE_CACHE_SHIFT;
-
- /* Free any previous set of page pointers */
- if (bp->b_pages)
- _xfs_buf_free_pages(bp);
-
- bp->b_pages = NULL;
- bp->b_addr = mem;
-
- rval = _xfs_buf_get_pages(bp, page_count, 0);
- if (rval)
- return rval;
-
- bp->b_offset = offset;
-
- for (i = 0; i < bp->b_page_count; i++) {
- bp->b_pages[i] = mem_to_page((void *)pageaddr);
- pageaddr += PAGE_CACHE_SIZE;
- }
-
- bp->b_count_desired = len;
- bp->b_buffer_length = buflen;
- bp->b_flags |= XBF_MAPPED;
- bp->b_flags &= ~_XBF_PAGE_LOCKED;
-
- return 0;
-}
-
-xfs_buf_t *
-xfs_buf_get_noaddr(
- size_t len,
- xfs_buftarg_t *target)
-{
- unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
- int error, i;
- xfs_buf_t *bp;
-
- bp = xfs_buf_allocate(0);
- if (unlikely(bp == NULL))
- goto fail;
- _xfs_buf_initialize(bp, target, 0, len, 0);
-
- error = _xfs_buf_get_pages(bp, page_count, 0);
- if (error)
- goto fail_free_buf;
-
- for (i = 0; i < page_count; i++) {
- bp->b_pages[i] = alloc_page(GFP_KERNEL);
- if (!bp->b_pages[i])
- goto fail_free_mem;
- }
- bp->b_flags |= _XBF_PAGES;
-
- error = _xfs_buf_map_pages(bp, XBF_MAPPED);
- if (unlikely(error)) {
- printk(KERN_WARNING "%s: failed to map pages\n",
- __func__);
- goto fail_free_mem;
- }
-
- xfs_buf_unlock(bp);
-
- XB_TRACE(bp, "no_daddr", len);
- return bp;
-
- fail_free_mem:
- while (--i >= 0)
- __free_page(bp->b_pages[i]);
- _xfs_buf_free_pages(bp);
- fail_free_buf:
- xfs_buf_deallocate(bp);
- fail:
- return NULL;
-}
-
-/*
- * Increment reference count on buffer, to hold the buffer concurrently
- * with another thread which may release (free) the buffer asynchronously.
- * Must hold the buffer already to call this function.
- */
-void
-xfs_buf_hold(
- xfs_buf_t *bp)
-{
- atomic_inc(&bp->b_hold);
- XB_TRACE(bp, "hold", 0);
-}
-
-/*
- * Releases a hold on the specified buffer. If the
- * the hold count is 1, calls xfs_buf_free.
- */
-void
-xfs_buf_rele(
- xfs_buf_t *bp)
-{
- xfs_bufhash_t *hash = bp->b_hash;
-
- XB_TRACE(bp, "rele", bp->b_relse);
-
- if (unlikely(!hash)) {
- ASSERT(!bp->b_relse);
- if (atomic_dec_and_test(&bp->b_hold))
- xfs_buf_free(bp);
- return;
- }
-
- if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
- if (bp->b_relse) {
- atomic_inc(&bp->b_hold);
- spin_unlock(&hash->bh_lock);
- (*(bp->b_relse)) (bp);
- } else if (bp->b_flags & XBF_FS_MANAGED) {
- spin_unlock(&hash->bh_lock);
- } else {
- ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
- list_del_init(&bp->b_hash_list);
- spin_unlock(&hash->bh_lock);
- xfs_buf_free(bp);
- }
- } else {
- /*
- * Catch reference count leaks
- */
- ASSERT(atomic_read(&bp->b_hold) >= 0);
- }
-}
-
-
-/*
- * Mutual exclusion on buffers. Locking model:
- *
- * Buffers associated with inodes for which buffer locking
- * is not enabled are not protected by semaphores, and are
- * assumed to be exclusively owned by the caller. There is a
- * spinlock in the buffer, used by the caller when concurrent
- * access is possible.
- */
-
-/*
- * Locks a buffer object, if it is not already locked.
- * Note that this in no way locks the underlying pages, so it is only
- * useful for synchronizing concurrent use of buffer objects, not for
- * synchronizing independent access to the underlying pages.
- */
-int
-xfs_buf_cond_lock(
- xfs_buf_t *bp)
-{
- int locked;
-
- locked = down_trylock(&bp->b_sema) == 0;
- if (locked) {
- XB_SET_OWNER(bp);
- }
- XB_TRACE(bp, "cond_lock", (long)locked);
- return locked ? 0 : -EBUSY;
-}
-
-#if defined(DEBUG) || defined(XFS_BLI_TRACE)
-int
-xfs_buf_lock_value(
- xfs_buf_t *bp)
-{
- return bp->b_sema.count;
-}
-#endif
-
-/*
- * Locks a buffer object.
- * Note that this in no way locks the underlying pages, so it is only
- * useful for synchronizing concurrent use of buffer objects, not for
- * synchronizing independent access to the underlying pages.
- */
-void
-xfs_buf_lock(
- xfs_buf_t *bp)
-{
- XB_TRACE(bp, "lock", 0);
- if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
- down(&bp->b_sema);
- XB_SET_OWNER(bp);
- XB_TRACE(bp, "locked", 0);
-}
-
-/*
- * Releases the lock on the buffer object.
- * If the buffer is marked delwri but is not queued, do so before we
- * unlock the buffer as we need to set flags correctly. We also need to
- * take a reference for the delwri queue because the unlocker is going to
- * drop their's and they don't know we just queued it.
- */
-void
-xfs_buf_unlock(
- xfs_buf_t *bp)
-{
- if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
- atomic_inc(&bp->b_hold);
- bp->b_flags |= XBF_ASYNC;
- xfs_buf_delwri_queue(bp, 0);
- }
-
- XB_CLEAR_OWNER(bp);
- up(&bp->b_sema);
- XB_TRACE(bp, "unlock", 0);
-}
-
-
-/*
- * Pinning Buffer Storage in Memory
- * Ensure that no attempt to force a buffer to disk will succeed.
- */
-void
-xfs_buf_pin(
- xfs_buf_t *bp)
-{
- atomic_inc(&bp->b_pin_count);
- XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
-}
-
-void
-xfs_buf_unpin(
- xfs_buf_t *bp)
-{
- if (atomic_dec_and_test(&bp->b_pin_count))
- wake_up_all(&bp->b_waiters);
- XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
-}
-
-int
-xfs_buf_ispin(
- xfs_buf_t *bp)
-{
- return atomic_read(&bp->b_pin_count);
-}
-
-STATIC void
-xfs_buf_wait_unpin(
- xfs_buf_t *bp)
-{
- DECLARE_WAITQUEUE (wait, current);
-
- if (atomic_read(&bp->b_pin_count) == 0)
- return;
-
- add_wait_queue(&bp->b_waiters, &wait);
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (atomic_read(&bp->b_pin_count) == 0)
- break;
- if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
- schedule();
- }
- remove_wait_queue(&bp->b_waiters, &wait);
- set_current_state(TASK_RUNNING);
-}
-
-/*
- * Buffer Utility Routines
- */
-
-STATIC void
-xfs_buf_iodone_work(
- struct work_struct *work)
-{
- xfs_buf_t *bp =
- container_of(work, xfs_buf_t, b_iodone_work);
-
- /*
- * We can get an EOPNOTSUPP to ordered writes. Here we clear the
- * ordered flag and reissue them. Because we can't tell the higher
- * layers directly that they should not issue ordered I/O anymore, they
- * need to check if the ordered flag was cleared during I/O completion.
- */
- if ((bp->b_error == EOPNOTSUPP) &&
- (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
- XB_TRACE(bp, "ordered_retry", bp->b_iodone);
- bp->b_flags &= ~XBF_ORDERED;
- xfs_buf_iorequest(bp);
- } else if (bp->b_iodone)
- (*(bp->b_iodone))(bp);
- else if (bp->b_flags & XBF_ASYNC)
- xfs_buf_relse(bp);
-}
-
-void
-xfs_buf_ioend(
- xfs_buf_t *bp,
- int schedule)
-{
- bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
- if (bp->b_error == 0)
- bp->b_flags |= XBF_DONE;
-
- XB_TRACE(bp, "iodone", bp->b_iodone);
-
- if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
- if (schedule) {
- INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
- queue_work(xfslogd_workqueue, &bp->b_iodone_work);
- } else {
- xfs_buf_iodone_work(&bp->b_iodone_work);
- }
- } else {
- up(&bp->b_iodonesema);
- }
-}
-
-void
-xfs_buf_ioerror(
- xfs_buf_t *bp,
- int error)
-{
- ASSERT(error >= 0 && error <= 0xffff);
- bp->b_error = (unsigned short)error;
- XB_TRACE(bp, "ioerror", (unsigned long)error);
-}
-
-/*
- * Initiate I/O on a buffer, based on the flags supplied.
- * The b_iodone routine in the buffer supplied will only be called
- * when all of the subsidiary I/O requests, if any, have been completed.
- */
-int
-xfs_buf_iostart(
- xfs_buf_t *bp,
- xfs_buf_flags_t flags)
-{
- int status = 0;
-
- XB_TRACE(bp, "iostart", (unsigned long)flags);
-
- if (flags & XBF_DELWRI) {
- bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
- bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
- xfs_buf_delwri_queue(bp, 1);
- return 0;
- }
-
- bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
- XBF_READ_AHEAD | _XBF_RUN_QUEUES);
- bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
- XBF_READ_AHEAD | _XBF_RUN_QUEUES);
-
- BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
-
- /* For writes allow an alternate strategy routine to precede
- * the actual I/O request (which may not be issued at all in
- * a shutdown situation, for example).
- */
- status = (flags & XBF_WRITE) ?
- xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
-
- /* Wait for I/O if we are not an async request.
- * Note: async I/O request completion will release the buffer,
- * and that can already be done by this point. So using the
- * buffer pointer from here on, after async I/O, is invalid.
- */
- if (!status && !(flags & XBF_ASYNC))
- status = xfs_buf_iowait(bp);
-
- return status;
-}
-
-STATIC_INLINE void
-_xfs_buf_ioend(
- xfs_buf_t *bp,
- int schedule)
-{
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
- bp->b_flags &= ~_XBF_PAGE_LOCKED;
- xfs_buf_ioend(bp, schedule);
- }
-}
-
-STATIC void
-xfs_buf_bio_end_io(
- struct bio *bio,
- int error)
-{
- xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
- unsigned int blocksize = bp->b_target->bt_bsize;
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- bp->b_error = EIO;
-
- do {
- struct page *page = bvec->bv_page;
-
- ASSERT(!PagePrivate(page));
- if (unlikely(bp->b_error)) {
- if (bp->b_flags & XBF_READ)
- ClearPageUptodate(page);
- } else if (blocksize >= PAGE_CACHE_SIZE) {
- SetPageUptodate(page);
- } else if (!PagePrivate(page) &&
- (bp->b_flags & _XBF_PAGE_CACHE)) {
- set_page_region(page, bvec->bv_offset, bvec->bv_len);
- }
-
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
-
- if (bp->b_flags & _XBF_PAGE_LOCKED)
- unlock_page(page);
- } while (bvec >= bio->bi_io_vec);
-
- _xfs_buf_ioend(bp, 1);
- bio_put(bio);
-}
-
-STATIC void
-_xfs_buf_ioapply(
- xfs_buf_t *bp)
-{
- int rw, map_i, total_nr_pages, nr_pages;
- struct bio *bio;
- int offset = bp->b_offset;
- int size = bp->b_count_desired;
- sector_t sector = bp->b_bn;
- unsigned int blocksize = bp->b_target->bt_bsize;
-
- total_nr_pages = bp->b_page_count;
- map_i = 0;
-
- if (bp->b_flags & XBF_ORDERED) {
- ASSERT(!(bp->b_flags & XBF_READ));
- rw = WRITE_BARRIER;
- } else if (bp->b_flags & _XBF_RUN_QUEUES) {
- ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
- bp->b_flags &= ~_XBF_RUN_QUEUES;
- rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
- } else {
- rw = (bp->b_flags & XBF_WRITE) ? WRITE :
- (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
- }
-
- /* Special code path for reading a sub page size buffer in --
- * we populate up the whole page, and hence the other metadata
- * in the same page. This optimization is only valid when the
- * filesystem block size is not smaller than the page size.
- */
- if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
- ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
- (XBF_READ|_XBF_PAGE_LOCKED)) &&
- (blocksize >= PAGE_CACHE_SIZE)) {
- bio = bio_alloc(GFP_NOIO, 1);
-
- bio->bi_bdev = bp->b_target->bt_bdev;
- bio->bi_sector = sector - (offset >> BBSHIFT);
- bio->bi_end_io = xfs_buf_bio_end_io;
- bio->bi_private = bp;
-
- bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
- size = 0;
-
- atomic_inc(&bp->b_io_remaining);
-
- goto submit_io;
- }
-
-next_chunk:
- atomic_inc(&bp->b_io_remaining);
- nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
- if (nr_pages > total_nr_pages)
- nr_pages = total_nr_pages;
-
- bio = bio_alloc(GFP_NOIO, nr_pages);
- bio->bi_bdev = bp->b_target->bt_bdev;
- bio->bi_sector = sector;
- bio->bi_end_io = xfs_buf_bio_end_io;
- bio->bi_private = bp;
-
- for (; size && nr_pages; nr_pages--, map_i++) {
- int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
-
- if (nbytes > size)
- nbytes = size;
-
- rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
- if (rbytes < nbytes)
- break;
-
- offset = 0;
- sector += nbytes >> BBSHIFT;
- size -= nbytes;
- total_nr_pages--;
- }
-
-submit_io:
- if (likely(bio->bi_size)) {
- submit_bio(rw, bio);
- if (size)
- goto next_chunk;
- } else {
- bio_put(bio);
- xfs_buf_ioerror(bp, EIO);
- }
-}
-
-int
-xfs_buf_iorequest(
- xfs_buf_t *bp)
-{
- XB_TRACE(bp, "iorequest", 0);
-
- if (bp->b_flags & XBF_DELWRI) {
- xfs_buf_delwri_queue(bp, 1);
- return 0;
- }
-
- if (bp->b_flags & XBF_WRITE) {
- xfs_buf_wait_unpin(bp);
- }
-
- xfs_buf_hold(bp);
-
- /* Set the count to 1 initially, this will stop an I/O
- * completion callout which happens before we have started
- * all the I/O from calling xfs_buf_ioend too early.
- */
- atomic_set(&bp->b_io_remaining, 1);
- _xfs_buf_ioapply(bp);
- _xfs_buf_ioend(bp, 0);
-
- xfs_buf_rele(bp);
- return 0;
-}
-
-/*
- * Waits for I/O to complete on the buffer supplied.
- * It returns immediately if no I/O is pending.
- * It returns the I/O error code, if any, or 0 if there was no error.
- */
-int
-xfs_buf_iowait(
- xfs_buf_t *bp)
-{
- XB_TRACE(bp, "iowait", 0);
- if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
- down(&bp->b_iodonesema);
- XB_TRACE(bp, "iowaited", (long)bp->b_error);
- return bp->b_error;
-}
-
-xfs_caddr_t
-xfs_buf_offset(
- xfs_buf_t *bp,
- size_t offset)
-{
- struct page *page;
-
- if (bp->b_flags & XBF_MAPPED)
- return XFS_BUF_PTR(bp) + offset;
-
- offset += bp->b_offset;
- page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
- return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
-}
-
-/*
- * Move data into or out of a buffer.
- */
-void
-xfs_buf_iomove(
- xfs_buf_t *bp, /* buffer to process */
- size_t boff, /* starting buffer offset */
- size_t bsize, /* length to copy */
- caddr_t data, /* data address */
- xfs_buf_rw_t mode) /* read/write/zero flag */
-{
- size_t bend, cpoff, csize;
- struct page *page;
-
- bend = boff + bsize;
- while (boff < bend) {
- page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
- cpoff = xfs_buf_poff(boff + bp->b_offset);
- csize = min_t(size_t,
- PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
-
- ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
-
- switch (mode) {
- case XBRW_ZERO:
- memset(page_address(page) + cpoff, 0, csize);
- break;
- case XBRW_READ:
- memcpy(data, page_address(page) + cpoff, csize);
- break;
- case XBRW_WRITE:
- memcpy(page_address(page) + cpoff, data, csize);
- }
-
- boff += csize;
- data += csize;
- }
-}
-
-/*
- * Handling of buffer targets (buftargs).
- */
-
-/*
- * Wait for any bufs with callbacks that have been submitted but
- * have not yet returned... walk the hash list for the target.
- */
-void
-xfs_wait_buftarg(
- xfs_buftarg_t *btp)
-{
- xfs_buf_t *bp, *n;
- xfs_bufhash_t *hash;
- uint i;
-
- for (i = 0; i < (1 << btp->bt_hashshift); i++) {
- hash = &btp->bt_hash[i];
-again:
- spin_lock(&hash->bh_lock);
- list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
- ASSERT(btp == bp->b_target);
- if (!(bp->b_flags & XBF_FS_MANAGED)) {
- spin_unlock(&hash->bh_lock);
- /*
- * Catch superblock reference count leaks
- * immediately
- */
- BUG_ON(bp->b_bn == 0);
- delay(100);
- goto again;
- }
- }
- spin_unlock(&hash->bh_lock);
- }
-}
-
-/*
- * Allocate buffer hash table for a given target.
- * For devices containing metadata (i.e. not the log/realtime devices)
- * we need to allocate a much larger hash table.
- */
-STATIC void
-xfs_alloc_bufhash(
- xfs_buftarg_t *btp,
- int external)
-{
- unsigned int i;
-
- btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
- btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
- btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
- sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
- for (i = 0; i < (1 << btp->bt_hashshift); i++) {
- spin_lock_init(&btp->bt_hash[i].bh_lock);
- INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
- }
-}
-
-STATIC void
-xfs_free_bufhash(
- xfs_buftarg_t *btp)
-{
- kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
- btp->bt_hash = NULL;
-}
-
-/*
- * buftarg list for delwrite queue processing
- */
-static LIST_HEAD(xfs_buftarg_list);
-static DEFINE_SPINLOCK(xfs_buftarg_lock);
-
-STATIC void
-xfs_register_buftarg(
- xfs_buftarg_t *btp)
-{
- spin_lock(&xfs_buftarg_lock);
- list_add(&btp->bt_list, &xfs_buftarg_list);
- spin_unlock(&xfs_buftarg_lock);
-}
-
-STATIC void
-xfs_unregister_buftarg(
- xfs_buftarg_t *btp)
-{
- spin_lock(&xfs_buftarg_lock);
- list_del(&btp->bt_list);
- spin_unlock(&xfs_buftarg_lock);
-}
-
-void
-xfs_free_buftarg(
- xfs_buftarg_t *btp,
- int external)
-{
- xfs_flush_buftarg(btp, 1);
- xfs_blkdev_issue_flush(btp);
- if (external)
- xfs_blkdev_put(btp->bt_bdev);
- xfs_free_bufhash(btp);
- iput(btp->bt_mapping->host);
-
- /* Unregister the buftarg first so that we don't get a
- * wakeup finding a non-existent task
- */
- xfs_unregister_buftarg(btp);
- kthread_stop(btp->bt_task);
-
- kmem_free(btp, sizeof(*btp));
-}
-
-STATIC int
-xfs_setsize_buftarg_flags(
- xfs_buftarg_t *btp,
- unsigned int blocksize,
- unsigned int sectorsize,
- int verbose)
-{
- btp->bt_bsize = blocksize;
- btp->bt_sshift = ffs(sectorsize) - 1;
- btp->bt_smask = sectorsize - 1;
-
- if (set_blocksize(btp->bt_bdev, sectorsize)) {
- printk(KERN_WARNING
- "XFS: Cannot set_blocksize to %u on device %s\n",
- sectorsize, XFS_BUFTARG_NAME(btp));
- return EINVAL;
- }
-
- if (verbose &&
- (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
- printk(KERN_WARNING
- "XFS: %u byte sectors in use on device %s. "
- "This is suboptimal; %u or greater is ideal.\n",
- sectorsize, XFS_BUFTARG_NAME(btp),
- (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
- }
-
- return 0;
-}
-
-/*
- * When allocating the initial buffer target we have not yet
- * read in the superblock, so don't know what sized sectors
- * are being used is at this early stage. Play safe.
- */
-STATIC int
-xfs_setsize_buftarg_early(
- xfs_buftarg_t *btp,
- struct block_device *bdev)
-{
- return xfs_setsize_buftarg_flags(btp,
- PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
-}
-
-int
-xfs_setsize_buftarg(
- xfs_buftarg_t *btp,
- unsigned int blocksize,
- unsigned int sectorsize)
-{
- return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
-}
-
-STATIC int
-xfs_mapping_buftarg(
- xfs_buftarg_t *btp,
- struct block_device *bdev)
-{
- struct backing_dev_info *bdi;
- struct inode *inode;
- struct address_space *mapping;
- static const struct address_space_operations mapping_aops = {
- .sync_page = block_sync_page,
- .migratepage = fail_migrate_page,
- };
-
- inode = new_inode(bdev->bd_inode->i_sb);
- if (!inode) {
- printk(KERN_WARNING
- "XFS: Cannot allocate mapping inode for device %s\n",
- XFS_BUFTARG_NAME(btp));
- return ENOMEM;
- }
- inode->i_mode = S_IFBLK;
- inode->i_bdev = bdev;
- inode->i_rdev = bdev->bd_dev;
- bdi = blk_get_backing_dev_info(bdev);
- if (!bdi)
- bdi = &default_backing_dev_info;
- mapping = &inode->i_data;
- mapping->a_ops = &mapping_aops;
- mapping->backing_dev_info = bdi;
- mapping_set_gfp_mask(mapping, GFP_NOFS);
- btp->bt_mapping = mapping;
- return 0;
-}
-
-STATIC int
-xfs_alloc_delwrite_queue(
- xfs_buftarg_t *btp)
-{
- int error = 0;
-
- INIT_LIST_HEAD(&btp->bt_list);
- INIT_LIST_HEAD(&btp->bt_delwrite_queue);
- spin_lock_init(&btp->bt_delwrite_lock);
- btp->bt_flags = 0;
- btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
- if (IS_ERR(btp->bt_task)) {
- error = PTR_ERR(btp->bt_task);
- goto out_error;
- }
- xfs_register_buftarg(btp);
-out_error:
- return error;
-}
-
-xfs_buftarg_t *
-xfs_alloc_buftarg(
- struct block_device *bdev,
- int external)
-{
- xfs_buftarg_t *btp;
-
- btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
-
- btp->bt_dev = bdev->bd_dev;
- btp->bt_bdev = bdev;
- if (xfs_setsize_buftarg_early(btp, bdev))
- goto error;
- if (xfs_mapping_buftarg(btp, bdev))
- goto error;
- if (xfs_alloc_delwrite_queue(btp))
- goto error;
- xfs_alloc_bufhash(btp, external);
- return btp;
-
-error:
- kmem_free(btp, sizeof(*btp));
- return NULL;
-}
-
-
-/*
- * Delayed write buffer handling
- */
-STATIC void
-xfs_buf_delwri_queue(
- xfs_buf_t *bp,
- int unlock)
-{
- struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
- spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
-
- XB_TRACE(bp, "delwri_q", (long)unlock);
- ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
-
- spin_lock(dwlk);
- /* If already in the queue, dequeue and place at tail */
- if (!list_empty(&bp->b_list)) {
- ASSERT(bp->b_flags & _XBF_DELWRI_Q);
- if (unlock)
- atomic_dec(&bp->b_hold);
- list_del(&bp->b_list);
- }
-
- bp->b_flags |= _XBF_DELWRI_Q;
- list_add_tail(&bp->b_list, dwq);
- bp->b_queuetime = jiffies;
- spin_unlock(dwlk);
-
- if (unlock)
- xfs_buf_unlock(bp);
-}
-
-void
-xfs_buf_delwri_dequeue(
- xfs_buf_t *bp)
-{
- spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
- int dequeued = 0;
-
- spin_lock(dwlk);
- if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
- ASSERT(bp->b_flags & _XBF_DELWRI_Q);
- list_del_init(&bp->b_list);
- dequeued = 1;
- }
- bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
- spin_unlock(dwlk);
-
- if (dequeued)
- xfs_buf_rele(bp);
-
- XB_TRACE(bp, "delwri_dq", (long)dequeued);
-}
-
-STATIC void
-xfs_buf_runall_queues(
- struct workqueue_struct *queue)
-{
- flush_workqueue(queue);
-}
-
-STATIC int
-xfsbufd_wakeup(
- int priority,
- gfp_t mask)
-{
- xfs_buftarg_t *btp;
-
- spin_lock(&xfs_buftarg_lock);
- list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
- if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
- continue;
- set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
- wake_up_process(btp->bt_task);
- }
- spin_unlock(&xfs_buftarg_lock);
- return 0;
-}
-
-/*
- * Move as many buffers as specified to the supplied list
- * idicating if we skipped any buffers to prevent deadlocks.
- */
-STATIC int
-xfs_buf_delwri_split(
- xfs_buftarg_t *target,
- struct list_head *list,
- unsigned long age)
-{
- xfs_buf_t *bp, *n;
- struct list_head *dwq = &target->bt_delwrite_queue;
- spinlock_t *dwlk = &target->bt_delwrite_lock;
- int skipped = 0;
- int force;
-
- force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
- INIT_LIST_HEAD(list);
- spin_lock(dwlk);
- list_for_each_entry_safe(bp, n, dwq, b_list) {
- XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
- ASSERT(bp->b_flags & XBF_DELWRI);
-
- if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
- if (!force &&
- time_before(jiffies, bp->b_queuetime + age)) {
- xfs_buf_unlock(bp);
- break;
- }
-
- bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
- _XBF_RUN_QUEUES);
- bp->b_flags |= XBF_WRITE;
- list_move_tail(&bp->b_list, list);
- } else
- skipped++;
- }
- spin_unlock(dwlk);
-
- return skipped;
-
-}
-
-STATIC int
-xfsbufd(
- void *data)
-{
- struct list_head tmp;
- xfs_buftarg_t *target = (xfs_buftarg_t *)data;
- int count;
- xfs_buf_t *bp;
-
- current->flags |= PF_MEMALLOC;
-
- set_freezable();
-
- do {
- if (unlikely(freezing(current))) {
- set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
- refrigerator();
- } else {
- clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
- }
-
- schedule_timeout_interruptible(
- xfs_buf_timer_centisecs * msecs_to_jiffies(10));
-
- xfs_buf_delwri_split(target, &tmp,
- xfs_buf_age_centisecs * msecs_to_jiffies(10));
-
- count = 0;
- while (!list_empty(&tmp)) {
- bp = list_entry(tmp.next, xfs_buf_t, b_list);
- ASSERT(target == bp->b_target);
-
- list_del_init(&bp->b_list);
- xfs_buf_iostrategy(bp);
- count++;
- }
-
- if (as_list_len > 0)
- purge_addresses();
- if (count)
- blk_run_address_space(target->bt_mapping);
-
- } while (!kthread_should_stop());
-
- return 0;
-}
-
-/*
- * Go through all incore buffers, and release buffers if they belong to
- * the given device. This is used in filesystem error handling to
- * preserve the consistency of its metadata.
- */
-int
-xfs_flush_buftarg(
- xfs_buftarg_t *target,
- int wait)
-{
- struct list_head tmp;
- xfs_buf_t *bp, *n;
- int pincount = 0;
-
- xfs_buf_runall_queues(xfsdatad_workqueue);
- xfs_buf_runall_queues(xfslogd_workqueue);
-
- set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
- pincount = xfs_buf_delwri_split(target, &tmp, 0);
-
- /*
- * Dropped the delayed write list lock, now walk the temporary list
- */
- list_for_each_entry_safe(bp, n, &tmp, b_list) {
- ASSERT(target == bp->b_target);
- if (wait)
- bp->b_flags &= ~XBF_ASYNC;
- else
- list_del_init(&bp->b_list);
-
- xfs_buf_iostrategy(bp);
- }
-
- if (wait)
- blk_run_address_space(target->bt_mapping);
-
- /*
- * Remaining list items must be flushed before returning
- */
- while (!list_empty(&tmp)) {
- bp = list_entry(tmp.next, xfs_buf_t, b_list);
-
- list_del_init(&bp->b_list);
- xfs_iowait(bp);
- xfs_buf_relse(bp);
- }
-
- return pincount;
-}
-
-int __init
-xfs_buf_init(void)
-{
-#ifdef XFS_BUF_TRACE
- xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
-#endif
-
- xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
- KM_ZONE_HWALIGN, NULL);
- if (!xfs_buf_zone)
- goto out_free_trace_buf;
-
- xfslogd_workqueue = create_workqueue("xfslogd");
- if (!xfslogd_workqueue)
- goto out_free_buf_zone;
-
- xfsdatad_workqueue = create_workqueue("xfsdatad");
- if (!xfsdatad_workqueue)
- goto out_destroy_xfslogd_workqueue;
-
- register_shrinker(&xfs_buf_shake);
- return 0;
-
- out_destroy_xfslogd_workqueue:
- destroy_workqueue(xfslogd_workqueue);
- out_free_buf_zone:
- kmem_zone_destroy(xfs_buf_zone);
- out_free_trace_buf:
-#ifdef XFS_BUF_TRACE
- ktrace_free(xfs_buf_trace_buf);
-#endif
- return -ENOMEM;
-}
-
-void
-xfs_buf_terminate(void)
-{
- unregister_shrinker(&xfs_buf_shake);
- destroy_workqueue(xfsdatad_workqueue);
- destroy_workqueue(xfslogd_workqueue);
- kmem_zone_destroy(xfs_buf_zone);
-#ifdef XFS_BUF_TRACE
- ktrace_free(xfs_buf_trace_buf);
-#endif
-}
-
-#ifdef CONFIG_KDB_MODULES
-struct list_head *
-xfs_get_buftarg_list(void)
-{
- return &xfs_buftarg_list;
-}
-#endif
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
deleted file mode 100644
index f948ec7ba9a..00000000000
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_BUF_H__
-#define __XFS_BUF_H__
-
-#include <linux/list.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/buffer_head.h>
-#include <linux/uio.h>
-
-/*
- * Base types
- */
-
-#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
-
-#define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
-#define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
-#define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
-#define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
-
-typedef enum {
- XBRW_READ = 1, /* transfer into target memory */
- XBRW_WRITE = 2, /* transfer from target memory */
- XBRW_ZERO = 3, /* Zero target memory */
-} xfs_buf_rw_t;
-
-typedef enum {
- XBF_READ = (1 << 0), /* buffer intended for reading from device */
- XBF_WRITE = (1 << 1), /* buffer intended for writing to device */
- XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */
- XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
- XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */
- XBF_DELWRI = (1 << 6), /* buffer has dirty pages */
- XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
- XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
- XBF_ORDERED = (1 << 11), /* use ordered writes */
- XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
-
- /* flags used only as arguments to access routines */
- XBF_LOCK = (1 << 14), /* lock requested */
- XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
- XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
-
- /* flags used only internally */
- _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
- _XBF_PAGES = (1 << 18), /* backed by refcounted pages */
- _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
- _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
-
- /*
- * Special flag for supporting metadata blocks smaller than a FSB.
- *
- * In this case we can have multiple xfs_buf_t on a single page and
- * need to lock out concurrent xfs_buf_t readers as they only
- * serialise access to the buffer.
- *
- * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
- * between reads of the page. Hence we can have one thread read the
- * page and modify it, but then race with another thread that thinks
- * the page is not up-to-date and hence reads it again.
- *
- * The result is that the first modifcation to the page is lost.
- * This sort of AGF/AGI reading race can happen when unlinking inodes
- * that require truncation and results in the AGI unlinked list
- * modifications being lost.
- */
- _XBF_PAGE_LOCKED = (1 << 22),
-} xfs_buf_flags_t;
-
-typedef enum {
- XBT_FORCE_SLEEP = 0,
- XBT_FORCE_FLUSH = 1,
-} xfs_buftarg_flags_t;
-
-typedef struct xfs_bufhash {
- struct list_head bh_list;
- spinlock_t bh_lock;
-} xfs_bufhash_t;
-
-typedef struct xfs_buftarg {
- dev_t bt_dev;
- struct block_device *bt_bdev;
- struct address_space *bt_mapping;
- unsigned int bt_bsize;
- unsigned int bt_sshift;
- size_t bt_smask;
-
- /* per device buffer hash table */
- uint bt_hashmask;
- uint bt_hashshift;
- xfs_bufhash_t *bt_hash;
-
- /* per device delwri queue */
- struct task_struct *bt_task;
- struct list_head bt_list;
- struct list_head bt_delwrite_queue;
- spinlock_t bt_delwrite_lock;
- unsigned long bt_flags;
-} xfs_buftarg_t;
-
-/*
- * xfs_buf_t: Buffer structure for pagecache-based buffers
- *
- * This buffer structure is used by the pagecache buffer management routines
- * to refer to an assembly of pages forming a logical buffer.
- *
- * The buffer structure is used on a temporary basis only, and discarded when
- * released. The real data storage is recorded in the pagecache. Buffers are
- * hashed to the block device on which the file system resides.
- */
-
-struct xfs_buf;
-typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
-typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
-typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
-
-#define XB_PAGES 2
-
-typedef struct xfs_buf {
- struct semaphore b_sema; /* semaphore for lockables */
- unsigned long b_queuetime; /* time buffer was queued */
- atomic_t b_pin_count; /* pin count */
- wait_queue_head_t b_waiters; /* unpin waiters */
- struct list_head b_list;
- xfs_buf_flags_t b_flags; /* status flags */
- struct list_head b_hash_list; /* hash table list */
- xfs_bufhash_t *b_hash; /* hash table list start */
- xfs_buftarg_t *b_target; /* buffer target (device) */
- atomic_t b_hold; /* reference count */
- xfs_daddr_t b_bn; /* block number for I/O */
- xfs_off_t b_file_offset; /* offset in file */
- size_t b_buffer_length;/* size of buffer in bytes */
- size_t b_count_desired;/* desired transfer size */
- void *b_addr; /* virtual address of buffer */
- struct work_struct b_iodone_work;
- atomic_t b_io_remaining; /* #outstanding I/O requests */
- xfs_buf_iodone_t b_iodone; /* I/O completion function */
- xfs_buf_relse_t b_relse; /* releasing function */
- xfs_buf_bdstrat_t b_strat; /* pre-write function */
- struct semaphore b_iodonesema; /* Semaphore for I/O waiters */
- void *b_fspriv;
- void *b_fspriv2;
- void *b_fspriv3;
- unsigned short b_error; /* error code on I/O */
- unsigned int b_page_count; /* size of page array */
- unsigned int b_offset; /* page offset in first page */
- struct page **b_pages; /* array of page pointers */
- struct page *b_page_array[XB_PAGES]; /* inline pages */
-#ifdef XFS_BUF_LOCK_TRACKING
- int b_last_holder;
-#endif
-} xfs_buf_t;
-
-
-/* Finding and Reading Buffers */
-extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t, xfs_buf_t *);
-#define xfs_incore(buftarg,blkno,len,lockit) \
- _xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
-
-extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t);
-#define xfs_buf_get(target, blkno, len, flags) \
- xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
-
-extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t);
-#define xfs_buf_read(target, blkno, len, flags) \
- xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
-
-extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
-extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
-extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
-extern void xfs_buf_hold(xfs_buf_t *);
-extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t);
-
-/* Releasing Buffers */
-extern void xfs_buf_free(xfs_buf_t *);
-extern void xfs_buf_rele(xfs_buf_t *);
-
-/* Locking and Unlocking Buffers */
-extern int xfs_buf_cond_lock(xfs_buf_t *);
-extern int xfs_buf_lock_value(xfs_buf_t *);
-extern void xfs_buf_lock(xfs_buf_t *);
-extern void xfs_buf_unlock(xfs_buf_t *);
-
-/* Buffer Read and Write Routines */
-extern void xfs_buf_ioend(xfs_buf_t *, int);
-extern void xfs_buf_ioerror(xfs_buf_t *, int);
-extern int xfs_buf_iostart(xfs_buf_t *, xfs_buf_flags_t);
-extern int xfs_buf_iorequest(xfs_buf_t *);
-extern int xfs_buf_iowait(xfs_buf_t *);
-extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
- xfs_buf_rw_t);
-
-static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
-{
- return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
-}
-
-static inline int xfs_buf_geterror(xfs_buf_t *bp)
-{
- return bp ? bp->b_error : ENOMEM;
-}
-
-/* Buffer Utility Routines */
-extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
-
-/* Pinning Buffer Storage in Memory */
-extern void xfs_buf_pin(xfs_buf_t *);
-extern void xfs_buf_unpin(xfs_buf_t *);
-extern int xfs_buf_ispin(xfs_buf_t *);
-
-/* Delayed Write Buffer Routines */
-extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
-
-/* Buffer Daemon Setup Routines */
-extern int xfs_buf_init(void);
-extern void xfs_buf_terminate(void);
-
-#ifdef XFS_BUF_TRACE
-extern ktrace_t *xfs_buf_trace_buf;
-extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
-#else
-#define xfs_buf_trace(bp,id,ptr,ra) do { } while (0)
-#endif
-
-#define xfs_buf_target_name(target) \
- ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
-
-
-#define XFS_B_ASYNC XBF_ASYNC
-#define XFS_B_DELWRI XBF_DELWRI
-#define XFS_B_READ XBF_READ
-#define XFS_B_WRITE XBF_WRITE
-#define XFS_B_STALE XBF_STALE
-
-#define XFS_BUF_TRYLOCK XBF_TRYLOCK
-#define XFS_INCORE_TRYLOCK XBF_TRYLOCK
-#define XFS_BUF_LOCK XBF_LOCK
-#define XFS_BUF_MAPPED XBF_MAPPED
-
-#define BUF_BUSY XBF_DONT_BLOCK
-
-#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
-#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
- ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
-
-#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
-#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
-#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE)
-#define XFS_BUF_SUPER_STALE(bp) do { \
- XFS_BUF_STALE(bp); \
- xfs_buf_delwri_dequeue(bp); \
- XFS_BUF_DONE(bp); \
- } while (0)
-
-#define XFS_BUF_MANAGE XBF_FS_MANAGED
-#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
-
-#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
-#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
-#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
-
-#define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no)
-#define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp)
-#define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0)
-
-#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
-#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
-#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
-
-#define XFS_BUF_BUSY(bp) do { } while (0)
-#define XFS_BUF_UNBUSY(bp) do { } while (0)
-#define XFS_BUF_ISBUSY(bp) (1)
-
-#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC)
-#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
-#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
-
-#define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED)
-#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
-#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
-
-#define XFS_BUF_SHUT(bp) do { } while (0)
-#define XFS_BUF_UNSHUT(bp) do { } while (0)
-#define XFS_BUF_ISSHUT(bp) (0)
-
-#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
-#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
-#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
-#define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ)
-
-#define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE)
-#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
-#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
-
-#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
-#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
-#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
-#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func))
-#define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL)
-
-#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
-#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
-#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2)
-#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val))
-#define XFS_BUF_FSPRIVATE3(bp, type) ((type)(bp)->b_fspriv3)
-#define XFS_BUF_SET_FSPRIVATE3(bp, val) ((bp)->b_fspriv3 = (void*)(val))
-#define XFS_BUF_SET_START(bp) do { } while (0)
-#define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func))
-
-#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr)
-#define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt)
-#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
-#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
-#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
-#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off))
-#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired)
-#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt))
-#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
-#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
-
-#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0)
-#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
-#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
-
-#define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp)
-
-#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
-#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
-#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
-#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
-#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema);
-
-#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
-#define XFS_BUF_TARGET(bp) ((bp)->b_target)
-#define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target)
-
-static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
-{
- bp->b_fspriv3 = mp;
- bp->b_strat = xfs_bdstrat_cb;
- xfs_buf_delwri_dequeue(bp);
- return xfs_buf_iostart(bp, XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
-}
-
-static inline void xfs_buf_relse(xfs_buf_t *bp)
-{
- if (!bp->b_relse)
- xfs_buf_unlock(bp);
- xfs_buf_rele(bp);
-}
-
-#define xfs_bpin(bp) xfs_buf_pin(bp)
-#define xfs_bunpin(bp) xfs_buf_unpin(bp)
-
-#define xfs_buftrace(id, bp) \
- xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
-
-#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
-
-#define xfs_biomove(bp, off, len, data, rw) \
- xfs_buf_iomove((bp), (off), (len), (data), \
- ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
-
-#define xfs_biozero(bp, off, len) \
- xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
-
-
-static inline int XFS_bwrite(xfs_buf_t *bp)
-{
- int iowait = (bp->b_flags & XBF_ASYNC) == 0;
- int error = 0;
-
- if (!iowait)
- bp->b_flags |= _XBF_RUN_QUEUES;
-
- xfs_buf_delwri_dequeue(bp);
- xfs_buf_iostrategy(bp);
- if (iowait) {
- error = xfs_buf_iowait(bp);
- xfs_buf_relse(bp);
- }
- return error;
-}
-
-/*
- * No error can be returned from xfs_buf_iostart for delwri
- * buffers as they are queued and no I/O is issued.
- */
-static inline void xfs_bdwrite(void *mp, xfs_buf_t *bp)
-{
- bp->b_strat = xfs_bdstrat_cb;
- bp->b_fspriv3 = mp;
- (void)xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC);
-}
-
-#define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
-
-#define xfs_iowait(bp) xfs_buf_iowait(bp)
-
-#define xfs_baread(target, rablkno, ralen) \
- xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
-
-
-/*
- * Handling of buftargs.
- */
-extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
-extern void xfs_free_buftarg(xfs_buftarg_t *, int);
-extern void xfs_wait_buftarg(xfs_buftarg_t *);
-extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
-extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
-#ifdef CONFIG_KDB_MODULES
-extern struct list_head *xfs_get_buftarg_list(void);
-#endif
-
-#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
-#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
-
-#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1)
-#define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1)
-
-#endif /* __XFS_BUF_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_dmapi_priv.h b/fs/xfs/linux-2.6/xfs_dmapi_priv.h
deleted file mode 100644
index a8b0b1685ee..00000000000
--- a/fs/xfs/linux-2.6/xfs_dmapi_priv.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DMAPI_PRIV_H__
-#define __XFS_DMAPI_PRIV_H__
-
-/*
- * Based on IO_ISDIRECT, decide which i_ flag is set.
- */
-#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
- DM_FLAGS_IMUX : 0)
-#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
-
-#endif /*__XFS_DMAPI_PRIV_H__*/
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
deleted file mode 100644
index 5f60363b934..00000000000
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ /dev/null
@@ -1,488 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_trans.h"
-#include "xfs_dmapi.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_error.h"
-#include "xfs_rw.h"
-#include "xfs_ioctl32.h"
-#include "xfs_vnodeops.h"
-
-#include <linux/dcache.h>
-#include <linux/smp_lock.h>
-
-static struct vm_operations_struct xfs_file_vm_ops;
-
-STATIC_INLINE ssize_t
-__xfs_file_read(
- struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs,
- int ioflags,
- loff_t pos)
-{
- struct file *file = iocb->ki_filp;
-
- BUG_ON(iocb->ki_pos != pos);
- if (unlikely(file->f_flags & O_DIRECT))
- ioflags |= IO_ISDIRECT;
- return xfs_read(XFS_I(file->f_path.dentry->d_inode), iocb, iov,
- nr_segs, &iocb->ki_pos, ioflags);
-}
-
-STATIC ssize_t
-xfs_file_aio_read(
- struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs,
- loff_t pos)
-{
- return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO, pos);
-}
-
-STATIC ssize_t
-xfs_file_aio_read_invis(
- struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs,
- loff_t pos)
-{
- return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
-}
-
-STATIC_INLINE ssize_t
-__xfs_file_write(
- struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs,
- int ioflags,
- loff_t pos)
-{
- struct file *file = iocb->ki_filp;
-
- BUG_ON(iocb->ki_pos != pos);
- if (unlikely(file->f_flags & O_DIRECT))
- ioflags |= IO_ISDIRECT;
- return xfs_write(XFS_I(file->f_mapping->host), iocb, iov, nr_segs,
- &iocb->ki_pos, ioflags);
-}
-
-STATIC ssize_t
-xfs_file_aio_write(
- struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs,
- loff_t pos)
-{
- return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO, pos);
-}
-
-STATIC ssize_t
-xfs_file_aio_write_invis(
- struct kiocb *iocb,
- const struct iovec *iov,
- unsigned long nr_segs,
- loff_t pos)
-{
- return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
-}
-
-STATIC ssize_t
-xfs_file_splice_read(
- struct file *infilp,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len,
- unsigned int flags)
-{
- return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode),
- infilp, ppos, pipe, len, flags, 0);
-}
-
-STATIC ssize_t
-xfs_file_splice_read_invis(
- struct file *infilp,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len,
- unsigned int flags)
-{
- return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode),
- infilp, ppos, pipe, len, flags, IO_INVIS);
-}
-
-STATIC ssize_t
-xfs_file_splice_write(
- struct pipe_inode_info *pipe,
- struct file *outfilp,
- loff_t *ppos,
- size_t len,
- unsigned int flags)
-{
- return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode),
- pipe, outfilp, ppos, len, flags, 0);
-}
-
-STATIC ssize_t
-xfs_file_splice_write_invis(
- struct pipe_inode_info *pipe,
- struct file *outfilp,
- loff_t *ppos,
- size_t len,
- unsigned int flags)
-{
- return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode),
- pipe, outfilp, ppos, len, flags, IO_INVIS);
-}
-
-STATIC int
-xfs_file_open(
- struct inode *inode,
- struct file *filp)
-{
- if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
- return -EFBIG;
- return -xfs_open(XFS_I(inode));
-}
-
-STATIC int
-xfs_file_release(
- struct inode *inode,
- struct file *filp)
-{
- return -xfs_release(XFS_I(inode));
-}
-
-/*
- * We ignore the datasync flag here because a datasync is effectively
- * identical to an fsync. That is, datasync implies that we need to write
- * only the metadata needed to be able to access the data that is written
- * if we crash after the call completes. Hence if we are writing beyond
- * EOF we have to log the inode size change as well, which makes it a
- * full fsync. If we don't write beyond EOF, the inode core will be
- * clean in memory and so we don't need to log the inode, just like
- * fsync.
- */
-STATIC int
-xfs_file_fsync(
- struct file *filp,
- struct dentry *dentry,
- int datasync)
-{
- xfs_iflags_clear(XFS_I(dentry->d_inode), XFS_ITRUNCATED);
- return -xfs_fsync(XFS_I(dentry->d_inode));
-}
-
-/*
- * Unfortunately we can't just use the clean and simple readdir implementation
- * below, because nfs might call back into ->lookup from the filldir callback
- * and that will deadlock the low-level btree code.
- *
- * Hopefully we'll find a better workaround that allows to use the optimal
- * version at least for local readdirs for 2.6.25.
- */
-#if 0
-STATIC int
-xfs_file_readdir(
- struct file *filp,
- void *dirent,
- filldir_t filldir)
-{
- struct inode *inode = filp->f_path.dentry->d_inode;
- xfs_inode_t *ip = XFS_I(inode);
- int error;
- size_t bufsize;
-
- /*
- * The Linux API doesn't pass down the total size of the buffer
- * we read into down to the filesystem. With the filldir concept
- * it's not needed for correct information, but the XFS dir2 leaf
- * code wants an estimate of the buffer size to calculate it's
- * readahead window and size the buffers used for mapping to
- * physical blocks.
- *
- * Try to give it an estimate that's good enough, maybe at some
- * point we can change the ->readdir prototype to include the
- * buffer size.
- */
- bufsize = (size_t)min_t(loff_t, PAGE_SIZE, inode->i_size);
-
- error = xfs_readdir(ip, dirent, bufsize,
- (xfs_off_t *)&filp->f_pos, filldir);
- if (error)
- return -error;
- return 0;
-}
-#else
-
-struct hack_dirent {
- u64 ino;
- loff_t offset;
- int namlen;
- unsigned int d_type;
- char name[];
-};
-
-struct hack_callback {
- char *dirent;
- size_t len;
- size_t used;
-};
-
-STATIC int
-xfs_hack_filldir(
- void *__buf,
- const char *name,
- int namlen,
- loff_t offset,
- u64 ino,
- unsigned int d_type)
-{
- struct hack_callback *buf = __buf;
- struct hack_dirent *de = (struct hack_dirent *)(buf->dirent + buf->used);
- unsigned int reclen;
-
- reclen = ALIGN(sizeof(struct hack_dirent) + namlen, sizeof(u64));
- if (buf->used + reclen > buf->len)
- return -EINVAL;
-
- de->namlen = namlen;
- de->offset = offset;
- de->ino = ino;
- de->d_type = d_type;
- memcpy(de->name, name, namlen);
- buf->used += reclen;
- return 0;
-}
-
-STATIC int
-xfs_file_readdir(
- struct file *filp,
- void *dirent,
- filldir_t filldir)
-{
- struct inode *inode = filp->f_path.dentry->d_inode;
- xfs_inode_t *ip = XFS_I(inode);
- struct hack_callback buf;
- struct hack_dirent *de;
- int error;
- loff_t size;
- int eof = 0;
- xfs_off_t start_offset, curr_offset, offset;
-
- /*
- * Try fairly hard to get memory
- */
- buf.len = PAGE_CACHE_SIZE;
- do {
- buf.dirent = kmalloc(buf.len, GFP_KERNEL);
- if (buf.dirent)
- break;
- buf.len >>= 1;
- } while (buf.len >= 1024);
-
- if (!buf.dirent)
- return -ENOMEM;
-
- curr_offset = filp->f_pos;
- if (curr_offset == 0x7fffffff)
- offset = 0xffffffff;
- else
- offset = filp->f_pos;
-
- while (!eof) {
- unsigned int reclen;
-
- start_offset = offset;
-
- buf.used = 0;
- error = -xfs_readdir(ip, &buf, buf.len, &offset,
- xfs_hack_filldir);
- if (error || offset == start_offset) {
- size = 0;
- break;
- }
-
- size = buf.used;
- de = (struct hack_dirent *)buf.dirent;
- while (size > 0) {
- curr_offset = de->offset /* & 0x7fffffff */;
- if (filldir(dirent, de->name, de->namlen,
- curr_offset & 0x7fffffff,
- de->ino, de->d_type)) {
- goto done;
- }
-
- reclen = ALIGN(sizeof(struct hack_dirent) + de->namlen,
- sizeof(u64));
- size -= reclen;
- de = (struct hack_dirent *)((char *)de + reclen);
- }
- }
-
- done:
- if (!error) {
- if (size == 0)
- filp->f_pos = offset & 0x7fffffff;
- else if (de)
- filp->f_pos = curr_offset;
- }
-
- kfree(buf.dirent);
- return error;
-}
-#endif
-
-STATIC int
-xfs_file_mmap(
- struct file *filp,
- struct vm_area_struct *vma)
-{
- vma->vm_ops = &xfs_file_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
-
- file_accessed(filp);
- return 0;
-}
-
-STATIC long
-xfs_file_ioctl(
- struct file *filp,
- unsigned int cmd,
- unsigned long p)
-{
- int error;
- struct inode *inode = filp->f_path.dentry->d_inode;
-
- error = xfs_ioctl(XFS_I(inode), filp, 0, cmd, (void __user *)p);
- xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED);
-
- /* NOTE: some of the ioctl's return positive #'s as a
- * byte count indicating success, such as
- * readlink_by_handle. So we don't "sign flip"
- * like most other routines. This means true
- * errors need to be returned as a negative value.
- */
- return error;
-}
-
-STATIC long
-xfs_file_ioctl_invis(
- struct file *filp,
- unsigned int cmd,
- unsigned long p)
-{
- int error;
- struct inode *inode = filp->f_path.dentry->d_inode;
-
- error = xfs_ioctl(XFS_I(inode), filp, IO_INVIS, cmd, (void __user *)p);
- xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED);
-
- /* NOTE: some of the ioctl's return positive #'s as a
- * byte count indicating success, such as
- * readlink_by_handle. So we don't "sign flip"
- * like most other routines. This means true
- * errors need to be returned as a negative value.
- */
- return error;
-}
-
-/*
- * mmap()d file has taken write protection fault and is being made
- * writable. We can set the page state up correctly for a writable
- * page, which means we can do correct delalloc accounting (ENOSPC
- * checking!) and unwritten extent mapping.
- */
-STATIC int
-xfs_vm_page_mkwrite(
- struct vm_area_struct *vma,
- struct page *page)
-{
- return block_page_mkwrite(vma, page, xfs_get_blocks);
-}
-
-const struct file_operations xfs_file_operations = {
- .llseek = generic_file_llseek,
- .read = do_sync_read,
- .write = do_sync_write,
- .aio_read = xfs_file_aio_read,
- .aio_write = xfs_file_aio_write,
- .splice_read = xfs_file_splice_read,
- .splice_write = xfs_file_splice_write,
- .unlocked_ioctl = xfs_file_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = xfs_file_compat_ioctl,
-#endif
- .mmap = xfs_file_mmap,
- .open = xfs_file_open,
- .release = xfs_file_release,
- .fsync = xfs_file_fsync,
-#ifdef HAVE_FOP_OPEN_EXEC
- .open_exec = xfs_file_open_exec,
-#endif
-};
-
-const struct file_operations xfs_invis_file_operations = {
- .llseek = generic_file_llseek,
- .read = do_sync_read,
- .write = do_sync_write,
- .aio_read = xfs_file_aio_read_invis,
- .aio_write = xfs_file_aio_write_invis,
- .splice_read = xfs_file_splice_read_invis,
- .splice_write = xfs_file_splice_write_invis,
- .unlocked_ioctl = xfs_file_ioctl_invis,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = xfs_file_compat_invis_ioctl,
-#endif
- .mmap = xfs_file_mmap,
- .open = xfs_file_open,
- .release = xfs_file_release,
- .fsync = xfs_file_fsync,
-};
-
-
-const struct file_operations xfs_dir_file_operations = {
- .read = generic_read_dir,
- .readdir = xfs_file_readdir,
- .unlocked_ioctl = xfs_file_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = xfs_file_compat_ioctl,
-#endif
- .fsync = xfs_file_fsync,
-};
-
-static struct vm_operations_struct xfs_file_vm_ops = {
- .fault = filemap_fault,
- .page_mkwrite = xfs_vm_page_mkwrite,
-};
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
deleted file mode 100644
index 1eefe61f0e1..00000000000
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_vnodeops.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inode.h"
-
-int fs_noerr(void) { return 0; }
-int fs_nosys(void) { return ENOSYS; }
-void fs_noval(void) { return; }
-
-void
-xfs_tosspages(
- xfs_inode_t *ip,
- xfs_off_t first,
- xfs_off_t last,
- int fiopt)
-{
- struct address_space *mapping = ip->i_vnode->i_mapping;
-
- if (mapping->nrpages)
- truncate_inode_pages(mapping, first);
-}
-
-int
-xfs_flushinval_pages(
- xfs_inode_t *ip,
- xfs_off_t first,
- xfs_off_t last,
- int fiopt)
-{
- struct address_space *mapping = ip->i_vnode->i_mapping;
- int ret = 0;
-
- if (mapping->nrpages) {
- xfs_iflags_clear(ip, XFS_ITRUNCATED);
- ret = filemap_write_and_wait(mapping);
- if (!ret)
- truncate_inode_pages(mapping, first);
- }
- return ret;
-}
-
-int
-xfs_flush_pages(
- xfs_inode_t *ip,
- xfs_off_t first,
- xfs_off_t last,
- uint64_t flags,
- int fiopt)
-{
- struct address_space *mapping = ip->i_vnode->i_mapping;
- int ret = 0;
- int ret2;
-
- if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
- xfs_iflags_clear(ip, XFS_ITRUNCATED);
- ret = filemap_fdatawrite(mapping);
- if (flags & XFS_B_ASYNC)
- return ret;
- ret2 = filemap_fdatawait(mapping);
- if (!ret)
- ret = ret2;
- }
- return ret;
-}
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.h b/fs/xfs/linux-2.6/xfs_fs_subr.h
deleted file mode 100644
index 82bb19b2599..00000000000
--- a/fs/xfs/linux-2.6/xfs_fs_subr.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_FS_SUBR_H__
-#define __XFS_FS_SUBR_H__
-
-extern int fs_noerr(void);
-extern int fs_nosys(void);
-extern void fs_noval(void);
-
-#endif /* __XFS_FS_SUBR_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_globals.h b/fs/xfs/linux-2.6/xfs_globals.h
deleted file mode 100644
index 2770b0085ee..00000000000
--- a/fs/xfs/linux-2.6/xfs_globals.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_GLOBALS_H__
-#define __XFS_GLOBALS_H__
-
-extern uint64_t xfs_panic_mask; /* set to cause more panics */
-extern struct cred *sys_cred;
-
-#endif /* __XFS_GLOBALS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
deleted file mode 100644
index 01939ba2d8d..00000000000
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ /dev/null
@@ -1,1285 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_rtalloc.h"
-#include "xfs_itable.h"
-#include "xfs_error.h"
-#include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_bmap.h"
-#include "xfs_buf_item.h"
-#include "xfs_utils.h"
-#include "xfs_dfrag.h"
-#include "xfs_fsops.h"
-#include "xfs_vnodeops.h"
-
-#include <linux/capability.h>
-#include <linux/dcache.h>
-#include <linux/mount.h>
-#include <linux/namei.h>
-#include <linux/pagemap.h>
-
-/*
- * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
- * a file or fs handle.
- *
- * XFS_IOC_PATH_TO_FSHANDLE
- * returns fs handle for a mount point or path within that mount point
- * XFS_IOC_FD_TO_HANDLE
- * returns full handle for a FD opened in user space
- * XFS_IOC_PATH_TO_HANDLE
- * returns full handle for a path
- */
-STATIC int
-xfs_find_handle(
- unsigned int cmd,
- void __user *arg)
-{
- int hsize;
- xfs_handle_t handle;
- xfs_fsop_handlereq_t hreq;
- struct inode *inode;
-
- if (copy_from_user(&hreq, arg, sizeof(hreq)))
- return -XFS_ERROR(EFAULT);
-
- memset((char *)&handle, 0, sizeof(handle));
-
- switch (cmd) {
- case XFS_IOC_PATH_TO_FSHANDLE:
- case XFS_IOC_PATH_TO_HANDLE: {
- struct path path;
- int error = user_lpath((const char __user *)hreq.path, &path);
- if (error)
- return error;
-
- ASSERT(path.dentry);
- ASSERT(path.dentry->d_inode);
- inode = igrab(path.dentry->d_inode);
- path_put(&path);
- break;
- }
-
- case XFS_IOC_FD_TO_HANDLE: {
- struct file *file;
-
- file = fget(hreq.fd);
- if (!file)
- return -EBADF;
-
- ASSERT(file->f_path.dentry);
- ASSERT(file->f_path.dentry->d_inode);
- inode = igrab(file->f_path.dentry->d_inode);
- fput(file);
- break;
- }
-
- default:
- ASSERT(0);
- return -XFS_ERROR(EINVAL);
- }
-
- if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
- /* we're not in XFS anymore, Toto */
- iput(inode);
- return -XFS_ERROR(EINVAL);
- }
-
- switch (inode->i_mode & S_IFMT) {
- case S_IFREG:
- case S_IFDIR:
- case S_IFLNK:
- break;
- default:
- iput(inode);
- return -XFS_ERROR(EBADF);
- }
-
- /* now we can grab the fsid */
- memcpy(&handle.ha_fsid, XFS_I(inode)->i_mount->m_fixedfsid,
- sizeof(xfs_fsid_t));
- hsize = sizeof(xfs_fsid_t);
-
- if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
- xfs_inode_t *ip = XFS_I(inode);
- int lock_mode;
-
- /* need to get access to the xfs_inode to read the generation */
- lock_mode = xfs_ilock_map_shared(ip);
-
- /* fill in fid section of handle from inode */
- handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
- sizeof(handle.ha_fid.fid_len);
- handle.ha_fid.fid_pad = 0;
- handle.ha_fid.fid_gen = ip->i_d.di_gen;
- handle.ha_fid.fid_ino = ip->i_ino;
-
- xfs_iunlock_map_shared(ip, lock_mode);
-
- hsize = XFS_HSIZE(handle);
- }
-
- /* now copy our handle into the user buffer & write out the size */
- if (copy_to_user(hreq.ohandle, &handle, hsize) ||
- copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
- iput(inode);
- return -XFS_ERROR(EFAULT);
- }
-
- iput(inode);
- return 0;
-}
-
-
-/*
- * Convert userspace handle data into inode.
- *
- * We use the fact that all the fsop_handlereq ioctl calls have a data
- * structure argument whose first component is always a xfs_fsop_handlereq_t,
- * so we can pass that sub structure into this handy, shared routine.
- *
- * If no error, caller must always iput the returned inode.
- */
-STATIC int
-xfs_vget_fsop_handlereq(
- xfs_mount_t *mp,
- struct inode *parinode, /* parent inode pointer */
- xfs_fsop_handlereq_t *hreq,
- struct inode **inode)
-{
- void __user *hanp;
- size_t hlen;
- xfs_fid_t *xfid;
- xfs_handle_t *handlep;
- xfs_handle_t handle;
- xfs_inode_t *ip;
- xfs_ino_t ino;
- __u32 igen;
- int error;
-
- /*
- * Only allow handle opens under a directory.
- */
- if (!S_ISDIR(parinode->i_mode))
- return XFS_ERROR(ENOTDIR);
-
- hanp = hreq->ihandle;
- hlen = hreq->ihandlen;
- handlep = &handle;
-
- if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
- return XFS_ERROR(EINVAL);
- if (copy_from_user(handlep, hanp, hlen))
- return XFS_ERROR(EFAULT);
- if (hlen < sizeof(*handlep))
- memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
- if (hlen > sizeof(handlep->ha_fsid)) {
- if (handlep->ha_fid.fid_len !=
- (hlen - sizeof(handlep->ha_fsid) -
- sizeof(handlep->ha_fid.fid_len)) ||
- handlep->ha_fid.fid_pad)
- return XFS_ERROR(EINVAL);
- }
-
- /*
- * Crack the handle, obtain the inode # & generation #
- */
- xfid = (struct xfs_fid *)&handlep->ha_fid;
- if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) {
- ino = xfid->fid_ino;
- igen = xfid->fid_gen;
- } else {
- return XFS_ERROR(EINVAL);
- }
-
- /*
- * Get the XFS inode, building a Linux inode to go with it.
- */
- error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
- if (error)
- return error;
- if (ip == NULL)
- return XFS_ERROR(EIO);
- if (ip->i_d.di_gen != igen) {
- xfs_iput_new(ip, XFS_ILOCK_SHARED);
- return XFS_ERROR(ENOENT);
- }
-
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- *inode = XFS_ITOV(ip);
- return 0;
-}
-
-STATIC int
-xfs_open_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
- struct file *parfilp,
- struct inode *parinode)
-{
- int error;
- int new_fd;
- int permflag;
- struct file *filp;
- struct inode *inode;
- struct dentry *dentry;
- xfs_fsop_handlereq_t hreq;
-
- if (!capable(CAP_SYS_ADMIN))
- return -XFS_ERROR(EPERM);
- if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
- return -XFS_ERROR(EFAULT);
-
- error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode);
- if (error)
- return -error;
-
- /* Restrict xfs_open_by_handle to directories & regular files. */
- if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
- iput(inode);
- return -XFS_ERROR(EINVAL);
- }
-
-#if BITS_PER_LONG != 32
- hreq.oflags |= O_LARGEFILE;
-#endif
- /* Put open permission in namei format. */
- permflag = hreq.oflags;
- if ((permflag+1) & O_ACCMODE)
- permflag++;
- if (permflag & O_TRUNC)
- permflag |= 2;
-
- if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
- (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
- iput(inode);
- return -XFS_ERROR(EPERM);
- }
-
- if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
- iput(inode);
- return -XFS_ERROR(EACCES);
- }
-
- /* Can't write directories. */
- if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
- iput(inode);
- return -XFS_ERROR(EISDIR);
- }
-
- if ((new_fd = get_unused_fd()) < 0) {
- iput(inode);
- return new_fd;
- }
-
- dentry = d_alloc_anon(inode);
- if (dentry == NULL) {
- iput(inode);
- put_unused_fd(new_fd);
- return -XFS_ERROR(ENOMEM);
- }
-
- /* Ensure umount returns EBUSY on umounts while this file is open. */
- mntget(parfilp->f_path.mnt);
-
- /* Create file pointer. */
- filp = dentry_open(dentry, parfilp->f_path.mnt, hreq.oflags);
- if (IS_ERR(filp)) {
- put_unused_fd(new_fd);
- return -XFS_ERROR(-PTR_ERR(filp));
- }
- if (inode->i_mode & S_IFREG) {
- /* invisible operation should not change atime */
- filp->f_flags |= O_NOATIME;
- filp->f_op = &xfs_invis_file_operations;
- }
-
- fd_install(new_fd, filp);
- return new_fd;
-}
-
-/*
- * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's
- * unused first argument.
- */
-STATIC int
-do_readlink(
- char __user *buffer,
- int buflen,
- const char *link)
-{
- int len;
-
- len = PTR_ERR(link);
- if (IS_ERR(link))
- goto out;
-
- len = strlen(link);
- if (len > (unsigned) buflen)
- len = buflen;
- if (copy_to_user(buffer, link, len))
- len = -EFAULT;
- out:
- return len;
-}
-
-
-STATIC int
-xfs_readlink_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
- struct inode *parinode)
-{
- struct inode *inode;
- xfs_fsop_handlereq_t hreq;
- __u32 olen;
- void *link;
- int error;
-
- if (!capable(CAP_SYS_ADMIN))
- return -XFS_ERROR(EPERM);
- if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
- return -XFS_ERROR(EFAULT);
-
- error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode);
- if (error)
- return -error;
-
- /* Restrict this handle operation to symlinks only. */
- if (!S_ISLNK(inode->i_mode)) {
- error = -XFS_ERROR(EINVAL);
- goto out_iput;
- }
-
- if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
- error = -XFS_ERROR(EFAULT);
- goto out_iput;
- }
-
- link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
- if (!link)
- goto out_iput;
-
- error = -xfs_readlink(XFS_I(inode), link);
- if (error)
- goto out_kfree;
- error = do_readlink(hreq.ohandle, olen, link);
- if (error)
- goto out_kfree;
-
- out_kfree:
- kfree(link);
- out_iput:
- iput(inode);
- return error;
-}
-
-STATIC int
-xfs_fssetdm_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
- struct inode *parinode)
-{
- int error;
- struct fsdmidata fsd;
- xfs_fsop_setdm_handlereq_t dmhreq;
- struct inode *inode;
-
- if (!capable(CAP_MKNOD))
- return -XFS_ERROR(EPERM);
- if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
- return -XFS_ERROR(EFAULT);
-
- error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &inode);
- if (error)
- return -error;
-
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
- error = -XFS_ERROR(EPERM);
- goto out;
- }
-
- if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
- error = -XFS_ERROR(EFAULT);
- goto out;
- }
-
- error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask,
- fsd.fsd_dmstate);
-
- out:
- iput(inode);
- return error;
-}
-
-STATIC int
-xfs_attrlist_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
- struct inode *parinode)
-{
- int error;
- attrlist_cursor_kern_t *cursor;
- xfs_fsop_attrlist_handlereq_t al_hreq;
- struct inode *inode;
- char *kbuf;
-
- if (!capable(CAP_SYS_ADMIN))
- return -XFS_ERROR(EPERM);
- if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
- return -XFS_ERROR(EFAULT);
- if (al_hreq.buflen > XATTR_LIST_MAX)
- return -XFS_ERROR(EINVAL);
-
- error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode);
- if (error)
- goto out;
-
- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
- if (!kbuf)
- goto out_vn_rele;
-
- cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
- error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen,
- al_hreq.flags, cursor);
- if (error)
- goto out_kfree;
-
- if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
- error = -EFAULT;
-
- out_kfree:
- kfree(kbuf);
- out_vn_rele:
- iput(inode);
- out:
- return -error;
-}
-
-STATIC int
-xfs_attrmulti_attr_get(
- struct inode *inode,
- char *name,
- char __user *ubuf,
- __uint32_t *len,
- __uint32_t flags)
-{
- char *kbuf;
- int error = EFAULT;
-
- if (*len > XATTR_SIZE_MAX)
- return EINVAL;
- kbuf = kmalloc(*len, GFP_KERNEL);
- if (!kbuf)
- return ENOMEM;
-
- error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags);
- if (error)
- goto out_kfree;
-
- if (copy_to_user(ubuf, kbuf, *len))
- error = EFAULT;
-
- out_kfree:
- kfree(kbuf);
- return error;
-}
-
-STATIC int
-xfs_attrmulti_attr_set(
- struct inode *inode,
- char *name,
- const char __user *ubuf,
- __uint32_t len,
- __uint32_t flags)
-{
- char *kbuf;
- int error = EFAULT;
-
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return EPERM;
- if (len > XATTR_SIZE_MAX)
- return EINVAL;
-
- kbuf = kmalloc(len, GFP_KERNEL);
- if (!kbuf)
- return ENOMEM;
-
- if (copy_from_user(kbuf, ubuf, len))
- goto out_kfree;
-
- error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
-
- out_kfree:
- kfree(kbuf);
- return error;
-}
-
-STATIC int
-xfs_attrmulti_attr_remove(
- struct inode *inode,
- char *name,
- __uint32_t flags)
-{
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return EPERM;
- return xfs_attr_remove(XFS_I(inode), name, flags);
-}
-
-STATIC int
-xfs_attrmulti_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
- struct file *parfilp,
- struct inode *parinode)
-{
- int error;
- xfs_attr_multiop_t *ops;
- xfs_fsop_attrmulti_handlereq_t am_hreq;
- struct inode *inode;
- unsigned int i, size;
- char *attr_name;
-
- if (!capable(CAP_SYS_ADMIN))
- return -XFS_ERROR(EPERM);
- if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
- return -XFS_ERROR(EFAULT);
-
- error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &inode);
- if (error)
- goto out;
-
- error = E2BIG;
- size = am_hreq.opcount * sizeof(attr_multiop_t);
- if (!size || size > 16 * PAGE_SIZE)
- goto out_vn_rele;
-
- error = ENOMEM;
- ops = kmalloc(size, GFP_KERNEL);
- if (!ops)
- goto out_vn_rele;
-
- error = EFAULT;
- if (copy_from_user(ops, am_hreq.ops, size))
- goto out_kfree_ops;
-
- attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
- if (!attr_name)
- goto out_kfree_ops;
-
-
- error = 0;
- for (i = 0; i < am_hreq.opcount; i++) {
- ops[i].am_error = strncpy_from_user(attr_name,
- ops[i].am_attrname, MAXNAMELEN);
- if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
- error = -ERANGE;
- if (ops[i].am_error < 0)
- break;
-
- switch (ops[i].am_opcode) {
- case ATTR_OP_GET:
- ops[i].am_error = xfs_attrmulti_attr_get(inode,
- attr_name, ops[i].am_attrvalue,
- &ops[i].am_length, ops[i].am_flags);
- break;
- case ATTR_OP_SET:
- ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
- if (ops[i].am_error)
- break;
- ops[i].am_error = xfs_attrmulti_attr_set(inode,
- attr_name, ops[i].am_attrvalue,
- ops[i].am_length, ops[i].am_flags);
- mnt_drop_write(parfilp->f_path.mnt);
- break;
- case ATTR_OP_REMOVE:
- ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
- if (ops[i].am_error)
- break;
- ops[i].am_error = xfs_attrmulti_attr_remove(inode,
- attr_name, ops[i].am_flags);
- mnt_drop_write(parfilp->f_path.mnt);
- break;
- default:
- ops[i].am_error = EINVAL;
- }
- }
-
- if (copy_to_user(am_hreq.ops, ops, size))
- error = XFS_ERROR(EFAULT);
-
- kfree(attr_name);
- out_kfree_ops:
- kfree(ops);
- out_vn_rele:
- iput(inode);
- out:
- return -error;
-}
-
-STATIC int
-xfs_ioc_space(
- struct xfs_inode *ip,
- struct inode *inode,
- struct file *filp,
- int ioflags,
- unsigned int cmd,
- void __user *arg)
-{
- xfs_flock64_t bf;
- int attr_flags = 0;
- int error;
-
- if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
- return -XFS_ERROR(EPERM);
-
- if (!(filp->f_mode & FMODE_WRITE))
- return -XFS_ERROR(EBADF);
-
- if (!S_ISREG(inode->i_mode))
- return -XFS_ERROR(EINVAL);
-
- if (copy_from_user(&bf, arg, sizeof(bf)))
- return -XFS_ERROR(EFAULT);
-
- if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
- attr_flags |= ATTR_NONBLOCK;
- if (ioflags & IO_INVIS)
- attr_flags |= ATTR_DMI;
-
- error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos,
- NULL, attr_flags);
- return -error;
-}
-
-STATIC int
-xfs_ioc_bulkstat(
- xfs_mount_t *mp,
- unsigned int cmd,
- void __user *arg)
-{
- xfs_fsop_bulkreq_t bulkreq;
- int count; /* # of records returned */
- xfs_ino_t inlast; /* last inode number */
- int done;
- int error;
-
- /* done = 1 if there are more stats to get and if bulkstat */
- /* should be called again (unused here, but used in dmapi) */
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -XFS_ERROR(EIO);
-
- if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
- return -XFS_ERROR(EFAULT);
-
- if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
- return -XFS_ERROR(EFAULT);
-
- if ((count = bulkreq.icount) <= 0)
- return -XFS_ERROR(EINVAL);
-
- if (bulkreq.ubuffer == NULL)
- return -XFS_ERROR(EINVAL);
-
- if (cmd == XFS_IOC_FSINUMBERS)
- error = xfs_inumbers(mp, &inlast, &count,
- bulkreq.ubuffer, xfs_inumbers_fmt);
- else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
- error = xfs_bulkstat_single(mp, &inlast,
- bulkreq.ubuffer, &done);
- else /* XFS_IOC_FSBULKSTAT */
- error = xfs_bulkstat(mp, &inlast, &count,
- (bulkstat_one_pf)xfs_bulkstat_one, NULL,
- sizeof(xfs_bstat_t), bulkreq.ubuffer,
- BULKSTAT_FG_QUICK, &done);
-
- if (error)
- return -error;
-
- if (bulkreq.ocount != NULL) {
- if (copy_to_user(bulkreq.lastip, &inlast,
- sizeof(xfs_ino_t)))
- return -XFS_ERROR(EFAULT);
-
- if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
- return -XFS_ERROR(EFAULT);
- }
-
- return 0;
-}
-
-STATIC int
-xfs_ioc_fsgeometry_v1(
- xfs_mount_t *mp,
- void __user *arg)
-{
- xfs_fsop_geom_v1_t fsgeo;
- int error;
-
- error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
- if (error)
- return -error;
-
- if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
- return -XFS_ERROR(EFAULT);
- return 0;
-}
-
-STATIC int
-xfs_ioc_fsgeometry(
- xfs_mount_t *mp,
- void __user *arg)
-{
- xfs_fsop_geom_t fsgeo;
- int error;
-
- error = xfs_fs_geometry(mp, &fsgeo, 4);
- if (error)
- return -error;
-
- if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
- return -XFS_ERROR(EFAULT);
- return 0;
-}
-
-/*
- * Linux extended inode flags interface.
- */
-
-STATIC unsigned int
-xfs_merge_ioc_xflags(
- unsigned int flags,
- unsigned int start)
-{
- unsigned int xflags = start;
-
- if (flags & FS_IMMUTABLE_FL)
- xflags |= XFS_XFLAG_IMMUTABLE;
- else
- xflags &= ~XFS_XFLAG_IMMUTABLE;
- if (flags & FS_APPEND_FL)
- xflags |= XFS_XFLAG_APPEND;
- else
- xflags &= ~XFS_XFLAG_APPEND;
- if (flags & FS_SYNC_FL)
- xflags |= XFS_XFLAG_SYNC;
- else
- xflags &= ~XFS_XFLAG_SYNC;
- if (flags & FS_NOATIME_FL)
- xflags |= XFS_XFLAG_NOATIME;
- else
- xflags &= ~XFS_XFLAG_NOATIME;
- if (flags & FS_NODUMP_FL)
- xflags |= XFS_XFLAG_NODUMP;
- else
- xflags &= ~XFS_XFLAG_NODUMP;
-
- return xflags;
-}
-
-STATIC unsigned int
-xfs_di2lxflags(
- __uint16_t di_flags)
-{
- unsigned int flags = 0;
-
- if (di_flags & XFS_DIFLAG_IMMUTABLE)
- flags |= FS_IMMUTABLE_FL;
- if (di_flags & XFS_DIFLAG_APPEND)
- flags |= FS_APPEND_FL;
- if (di_flags & XFS_DIFLAG_SYNC)
- flags |= FS_SYNC_FL;
- if (di_flags & XFS_DIFLAG_NOATIME)
- flags |= FS_NOATIME_FL;
- if (di_flags & XFS_DIFLAG_NODUMP)
- flags |= FS_NODUMP_FL;
- return flags;
-}
-
-STATIC int
-xfs_ioc_fsgetxattr(
- xfs_inode_t *ip,
- int attr,
- void __user *arg)
-{
- struct fsxattr fa;
-
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- fa.fsx_xflags = xfs_ip2xflags(ip);
- fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
- fa.fsx_projid = ip->i_d.di_projid;
-
- if (attr) {
- if (ip->i_afp) {
- if (ip->i_afp->if_flags & XFS_IFEXTENTS)
- fa.fsx_nextents = ip->i_afp->if_bytes /
- sizeof(xfs_bmbt_rec_t);
- else
- fa.fsx_nextents = ip->i_d.di_anextents;
- } else
- fa.fsx_nextents = 0;
- } else {
- if (ip->i_df.if_flags & XFS_IFEXTENTS)
- fa.fsx_nextents = ip->i_df.if_bytes /
- sizeof(xfs_bmbt_rec_t);
- else
- fa.fsx_nextents = ip->i_d.di_nextents;
- }
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- if (copy_to_user(arg, &fa, sizeof(fa)))
- return -EFAULT;
- return 0;
-}
-
-STATIC int
-xfs_ioc_fssetxattr(
- xfs_inode_t *ip,
- struct file *filp,
- void __user *arg)
-{
- struct fsxattr fa;
- struct bhv_vattr *vattr;
- int error;
- int attr_flags;
-
- if (copy_from_user(&fa, arg, sizeof(fa)))
- return -EFAULT;
-
- vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
- if (unlikely(!vattr))
- return -ENOMEM;
-
- attr_flags = 0;
- if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
- attr_flags |= ATTR_NONBLOCK;
-
- vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
- vattr->va_xflags = fa.fsx_xflags;
- vattr->va_extsize = fa.fsx_extsize;
- vattr->va_projid = fa.fsx_projid;
-
- error = -xfs_setattr(ip, vattr, attr_flags, NULL);
- if (!error)
- vn_revalidate(XFS_ITOV(ip)); /* update flags */
- kfree(vattr);
- return 0;
-}
-
-STATIC int
-xfs_ioc_getxflags(
- xfs_inode_t *ip,
- void __user *arg)
-{
- unsigned int flags;
-
- flags = xfs_di2lxflags(ip->i_d.di_flags);
- if (copy_to_user(arg, &flags, sizeof(flags)))
- return -EFAULT;
- return 0;
-}
-
-STATIC int
-xfs_ioc_setxflags(
- xfs_inode_t *ip,
- struct file *filp,
- void __user *arg)
-{
- struct bhv_vattr *vattr;
- unsigned int flags;
- int attr_flags;
- int error;
-
- if (copy_from_user(&flags, arg, sizeof(flags)))
- return -EFAULT;
-
- if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
- FS_NOATIME_FL | FS_NODUMP_FL | \
- FS_SYNC_FL))
- return -EOPNOTSUPP;
-
- vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
- if (unlikely(!vattr))
- return -ENOMEM;
-
- attr_flags = 0;
- if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
- attr_flags |= ATTR_NONBLOCK;
-
- vattr->va_mask = XFS_AT_XFLAGS;
- vattr->va_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
-
- error = -xfs_setattr(ip, vattr, attr_flags, NULL);
- if (likely(!error))
- vn_revalidate(XFS_ITOV(ip)); /* update flags */
- kfree(vattr);
- return error;
-}
-
-STATIC int
-xfs_ioc_getbmap(
- struct xfs_inode *ip,
- int ioflags,
- unsigned int cmd,
- void __user *arg)
-{
- struct getbmap bm;
- int iflags;
- int error;
-
- if (copy_from_user(&bm, arg, sizeof(bm)))
- return -XFS_ERROR(EFAULT);
-
- if (bm.bmv_count < 2)
- return -XFS_ERROR(EINVAL);
-
- iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
- if (ioflags & IO_INVIS)
- iflags |= BMV_IF_NO_DMAPI_READ;
-
- error = xfs_getbmap(ip, &bm, (struct getbmap __user *)arg+1, iflags);
- if (error)
- return -error;
-
- if (copy_to_user(arg, &bm, sizeof(bm)))
- return -XFS_ERROR(EFAULT);
- return 0;
-}
-
-STATIC int
-xfs_ioc_getbmapx(
- struct xfs_inode *ip,
- void __user *arg)
-{
- struct getbmapx bmx;
- struct getbmap bm;
- int iflags;
- int error;
-
- if (copy_from_user(&bmx, arg, sizeof(bmx)))
- return -XFS_ERROR(EFAULT);
-
- if (bmx.bmv_count < 2)
- return -XFS_ERROR(EINVAL);
-
- /*
- * Map input getbmapx structure to a getbmap
- * structure for xfs_getbmap.
- */
- GETBMAP_CONVERT(bmx, bm);
-
- iflags = bmx.bmv_iflags;
-
- if (iflags & (~BMV_IF_VALID))
- return -XFS_ERROR(EINVAL);
-
- iflags |= BMV_IF_EXTENDED;
-
- error = xfs_getbmap(ip, &bm, (struct getbmapx __user *)arg+1, iflags);
- if (error)
- return -error;
-
- GETBMAP_CONVERT(bm, bmx);
-
- if (copy_to_user(arg, &bmx, sizeof(bmx)))
- return -XFS_ERROR(EFAULT);
-
- return 0;
-}
-
-int
-xfs_ioctl(
- xfs_inode_t *ip,
- struct file *filp,
- int ioflags,
- unsigned int cmd,
- void __user *arg)
-{
- struct inode *inode = filp->f_path.dentry->d_inode;
- xfs_mount_t *mp = ip->i_mount;
- int error;
-
- xfs_itrace_entry(XFS_I(inode));
- switch (cmd) {
-
- case XFS_IOC_ALLOCSP:
- case XFS_IOC_FREESP:
- case XFS_IOC_RESVSP:
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP64:
- case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP64:
- /*
- * Only allow the sys admin to reserve space unless
- * unwritten extents are enabled.
- */
- if (!xfs_sb_version_hasextflgbit(&mp->m_sb) &&
- !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg);
-
- case XFS_IOC_DIOINFO: {
- struct dioattr da;
- xfs_buftarg_t *target =
- XFS_IS_REALTIME_INODE(ip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp;
-
- da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
- da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
-
- if (copy_to_user(arg, &da, sizeof(da)))
- return -XFS_ERROR(EFAULT);
- return 0;
- }
-
- case XFS_IOC_FSBULKSTAT_SINGLE:
- case XFS_IOC_FSBULKSTAT:
- case XFS_IOC_FSINUMBERS:
- return xfs_ioc_bulkstat(mp, cmd, arg);
-
- case XFS_IOC_FSGEOMETRY_V1:
- return xfs_ioc_fsgeometry_v1(mp, arg);
-
- case XFS_IOC_FSGEOMETRY:
- return xfs_ioc_fsgeometry(mp, arg);
-
- case XFS_IOC_GETVERSION:
- return put_user(inode->i_generation, (int __user *)arg);
-
- case XFS_IOC_FSGETXATTR:
- return xfs_ioc_fsgetxattr(ip, 0, arg);
- case XFS_IOC_FSGETXATTRA:
- return xfs_ioc_fsgetxattr(ip, 1, arg);
- case XFS_IOC_FSSETXATTR:
- return xfs_ioc_fssetxattr(ip, filp, arg);
- case XFS_IOC_GETXFLAGS:
- return xfs_ioc_getxflags(ip, arg);
- case XFS_IOC_SETXFLAGS:
- return xfs_ioc_setxflags(ip, filp, arg);
-
- case XFS_IOC_FSSETDM: {
- struct fsdmidata dmi;
-
- if (copy_from_user(&dmi, arg, sizeof(dmi)))
- return -XFS_ERROR(EFAULT);
-
- error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
- dmi.fsd_dmstate);
- return -error;
- }
-
- case XFS_IOC_GETBMAP:
- case XFS_IOC_GETBMAPA:
- return xfs_ioc_getbmap(ip, ioflags, cmd, arg);
-
- case XFS_IOC_GETBMAPX:
- return xfs_ioc_getbmapx(ip, arg);
-
- case XFS_IOC_FD_TO_HANDLE:
- case XFS_IOC_PATH_TO_HANDLE:
- case XFS_IOC_PATH_TO_FSHANDLE:
- return xfs_find_handle(cmd, arg);
-
- case XFS_IOC_OPEN_BY_HANDLE:
- return xfs_open_by_handle(mp, arg, filp, inode);
-
- case XFS_IOC_FSSETDM_BY_HANDLE:
- return xfs_fssetdm_by_handle(mp, arg, inode);
-
- case XFS_IOC_READLINK_BY_HANDLE:
- return xfs_readlink_by_handle(mp, arg, inode);
-
- case XFS_IOC_ATTRLIST_BY_HANDLE:
- return xfs_attrlist_by_handle(mp, arg, inode);
-
- case XFS_IOC_ATTRMULTI_BY_HANDLE:
- return xfs_attrmulti_by_handle(mp, arg, filp, inode);
-
- case XFS_IOC_SWAPEXT: {
- error = xfs_swapext((struct xfs_swapext __user *)arg);
- return -error;
- }
-
- case XFS_IOC_FSCOUNTS: {
- xfs_fsop_counts_t out;
-
- error = xfs_fs_counts(mp, &out);
- if (error)
- return -error;
-
- if (copy_to_user(arg, &out, sizeof(out)))
- return -XFS_ERROR(EFAULT);
- return 0;
- }
-
- case XFS_IOC_SET_RESBLKS: {
- xfs_fsop_resblks_t inout;
- __uint64_t in;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&inout, arg, sizeof(inout)))
- return -XFS_ERROR(EFAULT);
-
- /* input parameter is passed in resblks field of structure */
- in = inout.resblks;
- error = xfs_reserve_blocks(mp, &in, &inout);
- if (error)
- return -error;
-
- if (copy_to_user(arg, &inout, sizeof(inout)))
- return -XFS_ERROR(EFAULT);
- return 0;
- }
-
- case XFS_IOC_GET_RESBLKS: {
- xfs_fsop_resblks_t out;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- error = xfs_reserve_blocks(mp, NULL, &out);
- if (error)
- return -error;
-
- if (copy_to_user(arg, &out, sizeof(out)))
- return -XFS_ERROR(EFAULT);
-
- return 0;
- }
-
- case XFS_IOC_FSGROWFSDATA: {
- xfs_growfs_data_t in;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&in, arg, sizeof(in)))
- return -XFS_ERROR(EFAULT);
-
- error = xfs_growfs_data(mp, &in);
- return -error;
- }
-
- case XFS_IOC_FSGROWFSLOG: {
- xfs_growfs_log_t in;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&in, arg, sizeof(in)))
- return -XFS_ERROR(EFAULT);
-
- error = xfs_growfs_log(mp, &in);
- return -error;
- }
-
- case XFS_IOC_FSGROWFSRT: {
- xfs_growfs_rt_t in;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&in, arg, sizeof(in)))
- return -XFS_ERROR(EFAULT);
-
- error = xfs_growfs_rt(mp, &in);
- return -error;
- }
-
- case XFS_IOC_FREEZE:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (inode->i_sb->s_frozen == SB_UNFROZEN)
- freeze_bdev(inode->i_sb->s_bdev);
- return 0;
-
- case XFS_IOC_THAW:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (inode->i_sb->s_frozen != SB_UNFROZEN)
- thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
- return 0;
-
- case XFS_IOC_GOINGDOWN: {
- __uint32_t in;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (get_user(in, (__uint32_t __user *)arg))
- return -XFS_ERROR(EFAULT);
-
- error = xfs_fs_goingdown(mp, in);
- return -error;
- }
-
- case XFS_IOC_ERROR_INJECTION: {
- xfs_error_injection_t in;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&in, arg, sizeof(in)))
- return -XFS_ERROR(EFAULT);
-
- error = xfs_errortag_add(in.errtag, mp);
- return -error;
- }
-
- case XFS_IOC_ERROR_CLEARALL:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- error = xfs_errortag_clearall(mp, 1);
- return -error;
-
- default:
- return -ENOTTY;
- }
-}
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
deleted file mode 100644
index a4b254eb43b..00000000000
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ /dev/null
@@ -1,488 +0,0 @@
-/*
- * Copyright (c) 2004-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include <linux/compat.h>
-#include <linux/init.h>
-#include <linux/ioctl.h>
-#include <linux/syscalls.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <asm/uaccess.h>
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_vfs.h"
-#include "xfs_vnode.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_itable.h"
-#include "xfs_error.h"
-#include "xfs_dfrag.h"
-#include "xfs_vnodeops.h"
-#include "xfs_ioctl32.h"
-
-#define _NATIVE_IOC(cmd, type) \
- _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
-
-#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
-#define BROKEN_X86_ALIGNMENT
-#define _PACKED __attribute__((packed))
-/* on ia32 l_start is on a 32-bit boundary */
-typedef struct xfs_flock64_32 {
- __s16 l_type;
- __s16 l_whence;
- __s64 l_start __attribute__((packed));
- /* len == 0 means until end of file */
- __s64 l_len __attribute__((packed));
- __s32 l_sysid;
- __u32 l_pid;
- __s32 l_pad[4]; /* reserve area */
-} xfs_flock64_32_t;
-
-#define XFS_IOC_ALLOCSP_32 _IOW ('X', 10, struct xfs_flock64_32)
-#define XFS_IOC_FREESP_32 _IOW ('X', 11, struct xfs_flock64_32)
-#define XFS_IOC_ALLOCSP64_32 _IOW ('X', 36, struct xfs_flock64_32)
-#define XFS_IOC_FREESP64_32 _IOW ('X', 37, struct xfs_flock64_32)
-#define XFS_IOC_RESVSP_32 _IOW ('X', 40, struct xfs_flock64_32)
-#define XFS_IOC_UNRESVSP_32 _IOW ('X', 41, struct xfs_flock64_32)
-#define XFS_IOC_RESVSP64_32 _IOW ('X', 42, struct xfs_flock64_32)
-#define XFS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct xfs_flock64_32)
-
-/* just account for different alignment */
-STATIC unsigned long
-xfs_ioctl32_flock(
- unsigned long arg)
-{
- xfs_flock64_32_t __user *p32 = (void __user *)arg;
- xfs_flock64_t __user *p = compat_alloc_user_space(sizeof(*p));
-
- if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
- copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) ||
- copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) ||
- copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
- copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
- copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
- return -EFAULT;
-
- return (unsigned long)p;
-}
-
-typedef struct compat_xfs_fsop_geom_v1 {
- __u32 blocksize; /* filesystem (data) block size */
- __u32 rtextsize; /* realtime extent size */
- __u32 agblocks; /* fsblocks in an AG */
- __u32 agcount; /* number of allocation groups */
- __u32 logblocks; /* fsblocks in the log */
- __u32 sectsize; /* (data) sector size, bytes */
- __u32 inodesize; /* inode size in bytes */
- __u32 imaxpct; /* max allowed inode space(%) */
- __u64 datablocks; /* fsblocks in data subvolume */
- __u64 rtblocks; /* fsblocks in realtime subvol */
- __u64 rtextents; /* rt extents in realtime subvol*/
- __u64 logstart; /* starting fsblock of the log */
- unsigned char uuid[16]; /* unique id of the filesystem */
- __u32 sunit; /* stripe unit, fsblocks */
- __u32 swidth; /* stripe width, fsblocks */
- __s32 version; /* structure version */
- __u32 flags; /* superblock version flags */
- __u32 logsectsize; /* log sector size, bytes */
- __u32 rtsectsize; /* realtime sector size, bytes */
- __u32 dirblocksize; /* directory block size, bytes */
-} __attribute__((packed)) compat_xfs_fsop_geom_v1_t;
-
-#define XFS_IOC_FSGEOMETRY_V1_32 \
- _IOR ('X', 100, struct compat_xfs_fsop_geom_v1)
-
-STATIC unsigned long xfs_ioctl32_geom_v1(unsigned long arg)
-{
- compat_xfs_fsop_geom_v1_t __user *p32 = (void __user *)arg;
- xfs_fsop_geom_v1_t __user *p = compat_alloc_user_space(sizeof(*p));
-
- if (copy_in_user(p, p32, sizeof(*p32)))
- return -EFAULT;
- return (unsigned long)p;
-}
-
-typedef struct compat_xfs_inogrp {
- __u64 xi_startino; /* starting inode number */
- __s32 xi_alloccount; /* # bits set in allocmask */
- __u64 xi_allocmask; /* mask of allocated inodes */
-} __attribute__((packed)) compat_xfs_inogrp_t;
-
-STATIC int xfs_inumbers_fmt_compat(
- void __user *ubuffer,
- const xfs_inogrp_t *buffer,
- long count,
- long *written)
-{
- compat_xfs_inogrp_t __user *p32 = ubuffer;
- long i;
-
- for (i = 0; i < count; i++) {
- if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) ||
- put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) ||
- put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask))
- return -EFAULT;
- }
- *written = count * sizeof(*p32);
- return 0;
-}
-
-#else
-
-#define xfs_inumbers_fmt_compat xfs_inumbers_fmt
-#define _PACKED
-
-#endif
-
-/* XFS_IOC_FSBULKSTAT and friends */
-
-typedef struct compat_xfs_bstime {
- __s32 tv_sec; /* seconds */
- __s32 tv_nsec; /* and nanoseconds */
-} compat_xfs_bstime_t;
-
-STATIC int xfs_bstime_store_compat(
- compat_xfs_bstime_t __user *p32,
- const xfs_bstime_t *p)
-{
- __s32 sec32;
-
- sec32 = p->tv_sec;
- if (put_user(sec32, &p32->tv_sec) ||
- put_user(p->tv_nsec, &p32->tv_nsec))
- return -EFAULT;
- return 0;
-}
-
-typedef struct compat_xfs_bstat {
- __u64 bs_ino; /* inode number */
- __u16 bs_mode; /* type and mode */
- __u16 bs_nlink; /* number of links */
- __u32 bs_uid; /* user id */
- __u32 bs_gid; /* group id */
- __u32 bs_rdev; /* device value */
- __s32 bs_blksize; /* block size */
- __s64 bs_size; /* file size */
- compat_xfs_bstime_t bs_atime; /* access time */
- compat_xfs_bstime_t bs_mtime; /* modify time */
- compat_xfs_bstime_t bs_ctime; /* inode change time */
- int64_t bs_blocks; /* number of blocks */
- __u32 bs_xflags; /* extended flags */
- __s32 bs_extsize; /* extent size */
- __s32 bs_extents; /* number of extents */
- __u32 bs_gen; /* generation count */
- __u16 bs_projid; /* project id */
- unsigned char bs_pad[14]; /* pad space, unused */
- __u32 bs_dmevmask; /* DMIG event mask */
- __u16 bs_dmstate; /* DMIG state info */
- __u16 bs_aextents; /* attribute number of extents */
-} _PACKED compat_xfs_bstat_t;
-
-STATIC int xfs_bulkstat_one_fmt_compat(
- void __user *ubuffer,
- const xfs_bstat_t *buffer)
-{
- compat_xfs_bstat_t __user *p32 = ubuffer;
-
- if (put_user(buffer->bs_ino, &p32->bs_ino) ||
- put_user(buffer->bs_mode, &p32->bs_mode) ||
- put_user(buffer->bs_nlink, &p32->bs_nlink) ||
- put_user(buffer->bs_uid, &p32->bs_uid) ||
- put_user(buffer->bs_gid, &p32->bs_gid) ||
- put_user(buffer->bs_rdev, &p32->bs_rdev) ||
- put_user(buffer->bs_blksize, &p32->bs_blksize) ||
- put_user(buffer->bs_size, &p32->bs_size) ||
- xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
- xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
- xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
- put_user(buffer->bs_blocks, &p32->bs_blocks) ||
- put_user(buffer->bs_xflags, &p32->bs_xflags) ||
- put_user(buffer->bs_extsize, &p32->bs_extsize) ||
- put_user(buffer->bs_extents, &p32->bs_extents) ||
- put_user(buffer->bs_gen, &p32->bs_gen) ||
- put_user(buffer->bs_projid, &p32->bs_projid) ||
- put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
- put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
- put_user(buffer->bs_aextents, &p32->bs_aextents))
- return -EFAULT;
- return sizeof(*p32);
-}
-
-
-
-typedef struct compat_xfs_fsop_bulkreq {
- compat_uptr_t lastip; /* last inode # pointer */
- __s32 icount; /* count of entries in buffer */
- compat_uptr_t ubuffer; /* user buffer for inode desc. */
- compat_uptr_t ocount; /* output count pointer */
-} compat_xfs_fsop_bulkreq_t;
-
-#define XFS_IOC_FSBULKSTAT_32 \
- _IOWR('X', 101, struct compat_xfs_fsop_bulkreq)
-#define XFS_IOC_FSBULKSTAT_SINGLE_32 \
- _IOWR('X', 102, struct compat_xfs_fsop_bulkreq)
-#define XFS_IOC_FSINUMBERS_32 \
- _IOWR('X', 103, struct compat_xfs_fsop_bulkreq)
-
-/* copied from xfs_ioctl.c */
-STATIC int
-xfs_ioc_bulkstat_compat(
- xfs_mount_t *mp,
- unsigned int cmd,
- void __user *arg)
-{
- compat_xfs_fsop_bulkreq_t __user *p32 = (void __user *)arg;
- u32 addr;
- xfs_fsop_bulkreq_t bulkreq;
- int count; /* # of records returned */
- xfs_ino_t inlast; /* last inode number */
- int done;
- int error;
-
- /* done = 1 if there are more stats to get and if bulkstat */
- /* should be called again (unused here, but used in dmapi) */
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -XFS_ERROR(EIO);
-
- if (get_user(addr, &p32->lastip))
- return -EFAULT;
- bulkreq.lastip = compat_ptr(addr);
- if (get_user(bulkreq.icount, &p32->icount) ||
- get_user(addr, &p32->ubuffer))
- return -EFAULT;
- bulkreq.ubuffer = compat_ptr(addr);
- if (get_user(addr, &p32->ocount))
- return -EFAULT;
- bulkreq.ocount = compat_ptr(addr);
-
- if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
- return -XFS_ERROR(EFAULT);
-
- if ((count = bulkreq.icount) <= 0)
- return -XFS_ERROR(EINVAL);
-
- if (bulkreq.ubuffer == NULL)
- return -XFS_ERROR(EINVAL);
-
- if (cmd == XFS_IOC_FSINUMBERS)
- error = xfs_inumbers(mp, &inlast, &count,
- bulkreq.ubuffer, xfs_inumbers_fmt_compat);
- else {
- /* declare a var to get a warning in case the type changes */
- bulkstat_one_fmt_pf formatter = xfs_bulkstat_one_fmt_compat;
- error = xfs_bulkstat(mp, &inlast, &count,
- xfs_bulkstat_one, formatter,
- sizeof(compat_xfs_bstat_t), bulkreq.ubuffer,
- BULKSTAT_FG_QUICK, &done);
- }
- if (error)
- return -error;
-
- if (bulkreq.ocount != NULL) {
- if (copy_to_user(bulkreq.lastip, &inlast,
- sizeof(xfs_ino_t)))
- return -XFS_ERROR(EFAULT);
-
- if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
- return -XFS_ERROR(EFAULT);
- }
-
- return 0;
-}
-
-
-
-typedef struct compat_xfs_fsop_handlereq {
- __u32 fd; /* fd for FD_TO_HANDLE */
- compat_uptr_t path; /* user pathname */
- __u32 oflags; /* open flags */
- compat_uptr_t ihandle; /* user supplied handle */
- __u32 ihandlen; /* user supplied length */
- compat_uptr_t ohandle; /* user buffer for handle */
- compat_uptr_t ohandlen; /* user buffer length */
-} compat_xfs_fsop_handlereq_t;
-
-#define XFS_IOC_PATH_TO_FSHANDLE_32 \
- _IOWR('X', 104, struct compat_xfs_fsop_handlereq)
-#define XFS_IOC_PATH_TO_HANDLE_32 \
- _IOWR('X', 105, struct compat_xfs_fsop_handlereq)
-#define XFS_IOC_FD_TO_HANDLE_32 \
- _IOWR('X', 106, struct compat_xfs_fsop_handlereq)
-#define XFS_IOC_OPEN_BY_HANDLE_32 \
- _IOWR('X', 107, struct compat_xfs_fsop_handlereq)
-#define XFS_IOC_READLINK_BY_HANDLE_32 \
- _IOWR('X', 108, struct compat_xfs_fsop_handlereq)
-
-STATIC unsigned long xfs_ioctl32_fshandle(unsigned long arg)
-{
- compat_xfs_fsop_handlereq_t __user *p32 = (void __user *)arg;
- xfs_fsop_handlereq_t __user *p = compat_alloc_user_space(sizeof(*p));
- u32 addr;
-
- if (copy_in_user(&p->fd, &p32->fd, sizeof(__u32)) ||
- get_user(addr, &p32->path) ||
- put_user(compat_ptr(addr), &p->path) ||
- copy_in_user(&p->oflags, &p32->oflags, sizeof(__u32)) ||
- get_user(addr, &p32->ihandle) ||
- put_user(compat_ptr(addr), &p->ihandle) ||
- copy_in_user(&p->ihandlen, &p32->ihandlen, sizeof(__u32)) ||
- get_user(addr, &p32->ohandle) ||
- put_user(compat_ptr(addr), &p->ohandle) ||
- get_user(addr, &p32->ohandlen) ||
- put_user(compat_ptr(addr), &p->ohandlen))
- return -EFAULT;
-
- return (unsigned long)p;
-}
-
-
-STATIC long
-xfs_compat_ioctl(
- int mode,
- struct file *file,
- unsigned cmd,
- unsigned long arg)
-{
- struct inode *inode = file->f_path.dentry->d_inode;
- int error;
-
- switch (cmd) {
- case XFS_IOC_DIOINFO:
- case XFS_IOC_FSGEOMETRY:
- case XFS_IOC_FSGETXATTR:
- case XFS_IOC_FSSETXATTR:
- case XFS_IOC_FSGETXATTRA:
- case XFS_IOC_FSSETDM:
- case XFS_IOC_GETBMAP:
- case XFS_IOC_GETBMAPA:
- case XFS_IOC_GETBMAPX:
-/* not handled
- case XFS_IOC_FSSETDM_BY_HANDLE:
- case XFS_IOC_ATTRLIST_BY_HANDLE:
- case XFS_IOC_ATTRMULTI_BY_HANDLE:
-*/
- case XFS_IOC_FSCOUNTS:
- case XFS_IOC_SET_RESBLKS:
- case XFS_IOC_GET_RESBLKS:
- case XFS_IOC_FSGROWFSDATA:
- case XFS_IOC_FSGROWFSLOG:
- case XFS_IOC_FSGROWFSRT:
- case XFS_IOC_FREEZE:
- case XFS_IOC_THAW:
- case XFS_IOC_GOINGDOWN:
- case XFS_IOC_ERROR_INJECTION:
- case XFS_IOC_ERROR_CLEARALL:
- break;
-
- case XFS_IOC32_GETXFLAGS:
- case XFS_IOC32_SETXFLAGS:
- case XFS_IOC32_GETVERSION:
- cmd = _NATIVE_IOC(cmd, long);
- break;
-#ifdef BROKEN_X86_ALIGNMENT
- /* xfs_flock_t has wrong u32 vs u64 alignment */
- case XFS_IOC_ALLOCSP_32:
- case XFS_IOC_FREESP_32:
- case XFS_IOC_ALLOCSP64_32:
- case XFS_IOC_FREESP64_32:
- case XFS_IOC_RESVSP_32:
- case XFS_IOC_UNRESVSP_32:
- case XFS_IOC_RESVSP64_32:
- case XFS_IOC_UNRESVSP64_32:
- arg = xfs_ioctl32_flock(arg);
- cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
- break;
- case XFS_IOC_FSGEOMETRY_V1_32:
- arg = xfs_ioctl32_geom_v1(arg);
- cmd = _NATIVE_IOC(cmd, struct xfs_fsop_geom_v1);
- break;
-
-#else /* These are handled fine if no alignment issues */
- case XFS_IOC_ALLOCSP:
- case XFS_IOC_FREESP:
- case XFS_IOC_RESVSP:
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP64:
- case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP64:
- case XFS_IOC_FSGEOMETRY_V1:
- break;
-
- /* xfs_bstat_t still has wrong u32 vs u64 alignment */
- case XFS_IOC_SWAPEXT:
- break;
-
-#endif
- case XFS_IOC_FSBULKSTAT_32:
- case XFS_IOC_FSBULKSTAT_SINGLE_32:
- case XFS_IOC_FSINUMBERS_32:
- cmd = _NATIVE_IOC(cmd, struct xfs_fsop_bulkreq);
- return xfs_ioc_bulkstat_compat(XFS_I(inode)->i_mount,
- cmd, (void __user*)arg);
- case XFS_IOC_FD_TO_HANDLE_32:
- case XFS_IOC_PATH_TO_HANDLE_32:
- case XFS_IOC_PATH_TO_FSHANDLE_32:
- case XFS_IOC_OPEN_BY_HANDLE_32:
- case XFS_IOC_READLINK_BY_HANDLE_32:
- arg = xfs_ioctl32_fshandle(arg);
- cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
- break;
- default:
- return -ENOIOCTLCMD;
- }
-
- error = xfs_ioctl(XFS_I(inode), file, mode, cmd, (void __user *)arg);
- xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED);
-
- return error;
-}
-
-long
-xfs_file_compat_ioctl(
- struct file *file,
- unsigned cmd,
- unsigned long arg)
-{
- return xfs_compat_ioctl(0, file, cmd, arg);
-}
-
-long
-xfs_file_compat_invis_ioctl(
- struct file *file,
- unsigned cmd,
- unsigned long arg)
-{
- return xfs_compat_ioctl(IO_INVIS, file, cmd, arg);
-}
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
deleted file mode 100644
index 5fc61c824bb..00000000000
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ /dev/null
@@ -1,917 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
-#include "xfs_quota.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_rtalloc.h"
-#include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_utils.h"
-#include "xfs_vnodeops.h"
-
-#include <linux/capability.h>
-#include <linux/xattr.h>
-#include <linux/namei.h>
-#include <linux/security.h>
-#include <linux/falloc.h>
-
-/*
- * Bring the atime in the XFS inode uptodate.
- * Used before logging the inode to disk or when the Linux inode goes away.
- */
-void
-xfs_synchronize_atime(
- xfs_inode_t *ip)
-{
- struct inode *inode = ip->i_vnode;
-
- if (inode) {
- ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
- ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
- }
-}
-
-/*
- * If the linux inode exists, mark it dirty.
- * Used when commiting a dirty inode into a transaction so that
- * the inode will get written back by the linux code
- */
-void
-xfs_mark_inode_dirty_sync(
- xfs_inode_t *ip)
-{
- struct inode *inode = ip->i_vnode;
-
- if (inode)
- mark_inode_dirty_sync(inode);
-}
-
-/*
- * Change the requested timestamp in the given inode.
- * We don't lock across timestamp updates, and we don't log them but
- * we do record the fact that there is dirty information in core.
- *
- * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG
- * with XFS_ICHGTIME_ACC to be sure that access time
- * update will take. Calling first with XFS_ICHGTIME_ACC
- * and then XFS_ICHGTIME_MOD may fail to modify the access
- * timestamp if the filesystem is mounted noacctm.
- */
-void
-xfs_ichgtime(
- xfs_inode_t *ip,
- int flags)
-{
- struct inode *inode = vn_to_inode(XFS_ITOV(ip));
- timespec_t tv;
-
- nanotime(&tv);
- if (flags & XFS_ICHGTIME_MOD) {
- inode->i_mtime = tv;
- ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
- ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
- }
- if (flags & XFS_ICHGTIME_ACC) {
- inode->i_atime = tv;
- ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec;
- ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec;
- }
- if (flags & XFS_ICHGTIME_CHG) {
- inode->i_ctime = tv;
- ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec;
- ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec;
- }
-
- /*
- * We update the i_update_core field _after_ changing
- * the timestamps in order to coordinate properly with
- * xfs_iflush() so that we don't lose timestamp updates.
- * This keeps us from having to hold the inode lock
- * while doing this. We use the SYNCHRONIZE macro to
- * ensure that the compiler does not reorder the update
- * of i_update_core above the timestamp updates above.
- */
- SYNCHRONIZE();
- ip->i_update_core = 1;
- if (!(inode->i_state & I_NEW))
- mark_inode_dirty_sync(inode);
-}
-
-/*
- * Variant on the above which avoids querying the system clock
- * in situations where we know the Linux inode timestamps have
- * just been updated (and so we can update our inode cheaply).
- */
-void
-xfs_ichgtime_fast(
- xfs_inode_t *ip,
- struct inode *inode,
- int flags)
-{
- timespec_t *tvp;
-
- /*
- * Atime updates for read() & friends are handled lazily now, and
- * explicit updates must go through xfs_ichgtime()
- */
- ASSERT((flags & XFS_ICHGTIME_ACC) == 0);
-
- if (flags & XFS_ICHGTIME_MOD) {
- tvp = &inode->i_mtime;
- ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec;
- ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec;
- }
- if (flags & XFS_ICHGTIME_CHG) {
- tvp = &inode->i_ctime;
- ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec;
- ip->i_d.di_ctime.t_nsec = (__int32_t)tvp->tv_nsec;
- }
-
- /*
- * We update the i_update_core field _after_ changing
- * the timestamps in order to coordinate properly with
- * xfs_iflush() so that we don't lose timestamp updates.
- * This keeps us from having to hold the inode lock
- * while doing this. We use the SYNCHRONIZE macro to
- * ensure that the compiler does not reorder the update
- * of i_update_core above the timestamp updates above.
- */
- SYNCHRONIZE();
- ip->i_update_core = 1;
- if (!(inode->i_state & I_NEW))
- mark_inode_dirty_sync(inode);
-}
-
-
-/*
- * Pull the link count and size up from the xfs inode to the linux inode
- */
-STATIC void
-xfs_validate_fields(
- struct inode *inode)
-{
- struct xfs_inode *ip = XFS_I(inode);
- loff_t size;
-
- /* we're under i_sem so i_size can't change under us */
- size = XFS_ISIZE(ip);
- if (i_size_read(inode) != size)
- i_size_write(inode, size);
-}
-
-/*
- * Hook in SELinux. This is not quite correct yet, what we really need
- * here (as we do for default ACLs) is a mechanism by which creation of
- * these attrs can be journalled at inode creation time (along with the
- * inode, of course, such that log replay can't cause these to be lost).
- */
-STATIC int
-xfs_init_security(
- struct inode *inode,
- struct inode *dir)
-{
- struct xfs_inode *ip = XFS_I(inode);
- size_t length;
- void *value;
- char *name;
- int error;
-
- error = security_inode_init_security(inode, dir, &name,
- &value, &length);
- if (error) {
- if (error == -EOPNOTSUPP)
- return 0;
- return -error;
- }
-
- error = xfs_attr_set(ip, name, value, length, ATTR_SECURE);
- if (!error)
- xfs_iflags_set(ip, XFS_IMODIFIED);
-
- kfree(name);
- kfree(value);
- return error;
-}
-
-static void
-xfs_dentry_to_name(
- struct xfs_name *namep,
- struct dentry *dentry)
-{
- namep->name = dentry->d_name.name;
- namep->len = dentry->d_name.len;
-}
-
-STATIC void
-xfs_cleanup_inode(
- struct inode *dir,
- struct inode *inode,
- struct dentry *dentry,
- int mode)
-{
- struct xfs_name teardown;
-
- /* Oh, the horror.
- * If we can't add the ACL or we fail in
- * xfs_init_security we must back out.
- * ENOSPC can hit here, among other things.
- */
- xfs_dentry_to_name(&teardown, dentry);
-
- if (S_ISDIR(mode))
- xfs_rmdir(XFS_I(dir), &teardown, XFS_I(inode));
- else
- xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
- iput(inode);
-}
-
-STATIC int
-xfs_vn_mknod(
- struct inode *dir,
- struct dentry *dentry,
- int mode,
- dev_t rdev)
-{
- struct inode *inode;
- struct xfs_inode *ip = NULL;
- xfs_acl_t *default_acl = NULL;
- struct xfs_name name;
- attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS;
- int error;
-
- /*
- * Irix uses Missed'em'V split, but doesn't want to see
- * the upper 5 bits of (14bit) major.
- */
- if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
- return -EINVAL;
-
- if (test_default_acl && test_default_acl(dir)) {
- if (!_ACL_ALLOC(default_acl)) {
- return -ENOMEM;
- }
- if (!_ACL_GET_DEFAULT(dir, default_acl)) {
- _ACL_FREE(default_acl);
- default_acl = NULL;
- }
- }
-
- xfs_dentry_to_name(&name, dentry);
-
- if (IS_POSIXACL(dir) && !default_acl)
- mode &= ~current->fs->umask;
-
- switch (mode & S_IFMT) {
- case S_IFCHR:
- case S_IFBLK:
- case S_IFIFO:
- case S_IFSOCK:
- rdev = sysv_encode_dev(rdev);
- case S_IFREG:
- error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL);
- break;
- case S_IFDIR:
- error = xfs_mkdir(XFS_I(dir), &name, mode, &ip, NULL);
- break;
- default:
- error = EINVAL;
- break;
- }
-
- if (unlikely(error))
- goto out_free_acl;
-
- inode = ip->i_vnode;
-
- error = xfs_init_security(inode, dir);
- if (unlikely(error))
- goto out_cleanup_inode;
-
- if (default_acl) {
- error = _ACL_INHERIT(inode, mode, default_acl);
- if (unlikely(error))
- goto out_cleanup_inode;
- xfs_iflags_set(ip, XFS_IMODIFIED);
- _ACL_FREE(default_acl);
- }
-
-
- if (S_ISDIR(mode))
- xfs_validate_fields(inode);
- d_instantiate(dentry, inode);
- xfs_validate_fields(dir);
- return -error;
-
- out_cleanup_inode:
- xfs_cleanup_inode(dir, inode, dentry, mode);
- out_free_acl:
- if (default_acl)
- _ACL_FREE(default_acl);
- return -error;
-}
-
-STATIC int
-xfs_vn_create(
- struct inode *dir,
- struct dentry *dentry,
- int mode,
- struct nameidata *nd)
-{
- return xfs_vn_mknod(dir, dentry, mode, 0);
-}
-
-STATIC int
-xfs_vn_mkdir(
- struct inode *dir,
- struct dentry *dentry,
- int mode)
-{
- return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0);
-}
-
-STATIC struct dentry *
-xfs_vn_lookup(
- struct inode *dir,
- struct dentry *dentry,
- struct nameidata *nd)
-{
- struct xfs_inode *cip;
- struct xfs_name name;
- int error;
-
- if (dentry->d_name.len >= MAXNAMELEN)
- return ERR_PTR(-ENAMETOOLONG);
-
- xfs_dentry_to_name(&name, dentry);
- error = xfs_lookup(XFS_I(dir), &name, &cip);
- if (unlikely(error)) {
- if (unlikely(error != ENOENT))
- return ERR_PTR(-error);
- d_add(dentry, NULL);
- return NULL;
- }
-
- return d_splice_alias(cip->i_vnode, dentry);
-}
-
-STATIC int
-xfs_vn_link(
- struct dentry *old_dentry,
- struct inode *dir,
- struct dentry *dentry)
-{
- struct inode *inode; /* inode of guy being linked to */
- struct xfs_name name;
- int error;
-
- inode = old_dentry->d_inode;
- xfs_dentry_to_name(&name, dentry);
-
- igrab(inode);
- error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
- if (unlikely(error)) {
- iput(inode);
- return -error;
- }
-
- xfs_iflags_set(XFS_I(dir), XFS_IMODIFIED);
- xfs_validate_fields(inode);
- d_instantiate(dentry, inode);
- return 0;
-}
-
-STATIC int
-xfs_vn_unlink(
- struct inode *dir,
- struct dentry *dentry)
-{
- struct inode *inode;
- struct xfs_name name;
- int error;
-
- inode = dentry->d_inode;
- xfs_dentry_to_name(&name, dentry);
-
- error = xfs_remove(XFS_I(dir), &name, XFS_I(inode));
- if (likely(!error)) {
- xfs_validate_fields(dir); /* size needs update */
- xfs_validate_fields(inode);
- }
- return -error;
-}
-
-STATIC int
-xfs_vn_symlink(
- struct inode *dir,
- struct dentry *dentry,
- const char *symname)
-{
- struct inode *inode;
- struct xfs_inode *cip = NULL;
- struct xfs_name name;
- int error;
- mode_t mode;
-
- mode = S_IFLNK |
- (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO);
- xfs_dentry_to_name(&name, dentry);
-
- error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip, NULL);
- if (unlikely(error))
- goto out;
-
- inode = cip->i_vnode;
-
- error = xfs_init_security(inode, dir);
- if (unlikely(error))
- goto out_cleanup_inode;
-
- d_instantiate(dentry, inode);
- xfs_validate_fields(dir);
- xfs_validate_fields(inode);
- return 0;
-
- out_cleanup_inode:
- xfs_cleanup_inode(dir, inode, dentry, 0);
- out:
- return -error;
-}
-
-STATIC int
-xfs_vn_rmdir(
- struct inode *dir,
- struct dentry *dentry)
-{
- struct inode *inode = dentry->d_inode;
- struct xfs_name name;
- int error;
-
- xfs_dentry_to_name(&name, dentry);
-
- error = xfs_rmdir(XFS_I(dir), &name, XFS_I(inode));
- if (likely(!error)) {
- xfs_validate_fields(inode);
- xfs_validate_fields(dir);
- }
- return -error;
-}
-
-STATIC int
-xfs_vn_rename(
- struct inode *odir,
- struct dentry *odentry,
- struct inode *ndir,
- struct dentry *ndentry)
-{
- struct inode *new_inode = ndentry->d_inode;
- struct xfs_name oname;
- struct xfs_name nname;
- int error;
-
- xfs_dentry_to_name(&oname, odentry);
- xfs_dentry_to_name(&nname, ndentry);
-
- error = xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
- XFS_I(ndir), &nname, new_inode ?
- XFS_I(new_inode) : NULL);
- if (likely(!error)) {
- if (new_inode)
- xfs_validate_fields(new_inode);
- xfs_validate_fields(odir);
- if (ndir != odir)
- xfs_validate_fields(ndir);
- }
- return -error;
-}
-
-/*
- * careful here - this function can get called recursively, so
- * we need to be very careful about how much stack we use.
- * uio is kmalloced for this reason...
- */
-STATIC void *
-xfs_vn_follow_link(
- struct dentry *dentry,
- struct nameidata *nd)
-{
- char *link;
- int error = -ENOMEM;
-
- link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
- if (!link)
- goto out_err;
-
- error = -xfs_readlink(XFS_I(dentry->d_inode), link);
- if (unlikely(error))
- goto out_kfree;
-
- nd_set_link(nd, link);
- return NULL;
-
- out_kfree:
- kfree(link);
- out_err:
- nd_set_link(nd, ERR_PTR(error));
- return NULL;
-}
-
-STATIC void
-xfs_vn_put_link(
- struct dentry *dentry,
- struct nameidata *nd,
- void *p)
-{
- char *s = nd_get_link(nd);
-
- if (!IS_ERR(s))
- kfree(s);
-}
-
-#ifdef CONFIG_XFS_POSIX_ACL
-STATIC int
-xfs_check_acl(
- struct inode *inode,
- int mask)
-{
- struct xfs_inode *ip = XFS_I(inode);
- int error;
-
- xfs_itrace_entry(ip);
-
- if (XFS_IFORK_Q(ip)) {
- error = xfs_acl_iaccess(ip, mask, NULL);
- if (error != -1)
- return -error;
- }
-
- return -EAGAIN;
-}
-
-STATIC int
-xfs_vn_permission(
- struct inode *inode,
- int mask)
-{
- return generic_permission(inode, mask, xfs_check_acl);
-}
-#else
-#define xfs_vn_permission NULL
-#endif
-
-STATIC int
-xfs_vn_getattr(
- struct vfsmount *mnt,
- struct dentry *dentry,
- struct kstat *stat)
-{
- struct inode *inode = dentry->d_inode;
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
-
- xfs_itrace_entry(ip);
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return XFS_ERROR(EIO);
-
- stat->size = XFS_ISIZE(ip);
- stat->dev = inode->i_sb->s_dev;
- stat->mode = ip->i_d.di_mode;
- stat->nlink = ip->i_d.di_nlink;
- stat->uid = ip->i_d.di_uid;
- stat->gid = ip->i_d.di_gid;
- stat->ino = ip->i_ino;
-#if XFS_BIG_INUMS
- stat->ino += mp->m_inoadd;
-#endif
- stat->atime = inode->i_atime;
- stat->mtime.tv_sec = ip->i_d.di_mtime.t_sec;
- stat->mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
- stat->ctime.tv_sec = ip->i_d.di_ctime.t_sec;
- stat->ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
- stat->blocks =
- XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
-
-
- switch (inode->i_mode & S_IFMT) {
- case S_IFBLK:
- case S_IFCHR:
- stat->blksize = BLKDEV_IOSIZE;
- stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
- sysv_minor(ip->i_df.if_u2.if_rdev));
- break;
- default:
- if (XFS_IS_REALTIME_INODE(ip)) {
- /*
- * If the file blocks are being allocated from a
- * realtime volume, then return the inode's realtime
- * extent size or the realtime volume's extent size.
- */
- stat->blksize =
- xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
- } else
- stat->blksize = xfs_preferred_iosize(mp);
- stat->rdev = 0;
- break;
- }
-
- return 0;
-}
-
-STATIC int
-xfs_vn_setattr(
- struct dentry *dentry,
- struct iattr *attr)
-{
- struct inode *inode = dentry->d_inode;
- unsigned int ia_valid = attr->ia_valid;
- bhv_vattr_t vattr = { 0 };
- int flags = 0;
- int error;
-
- if (ia_valid & ATTR_UID) {
- vattr.va_mask |= XFS_AT_UID;
- vattr.va_uid = attr->ia_uid;
- }
- if (ia_valid & ATTR_GID) {
- vattr.va_mask |= XFS_AT_GID;
- vattr.va_gid = attr->ia_gid;
- }
- if (ia_valid & ATTR_SIZE) {
- vattr.va_mask |= XFS_AT_SIZE;
- vattr.va_size = attr->ia_size;
- }
- if (ia_valid & ATTR_ATIME) {
- vattr.va_mask |= XFS_AT_ATIME;
- vattr.va_atime = attr->ia_atime;
- inode->i_atime = attr->ia_atime;
- }
- if (ia_valid & ATTR_MTIME) {
- vattr.va_mask |= XFS_AT_MTIME;
- vattr.va_mtime = attr->ia_mtime;
- }
- if (ia_valid & ATTR_CTIME) {
- vattr.va_mask |= XFS_AT_CTIME;
- vattr.va_ctime = attr->ia_ctime;
- }
- if (ia_valid & ATTR_MODE) {
- vattr.va_mask |= XFS_AT_MODE;
- vattr.va_mode = attr->ia_mode;
- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
- inode->i_mode &= ~S_ISGID;
- }
-
- if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET))
- flags |= ATTR_UTIME;
-#ifdef ATTR_NO_BLOCK
- if ((ia_valid & ATTR_NO_BLOCK))
- flags |= ATTR_NONBLOCK;
-#endif
-
- error = xfs_setattr(XFS_I(inode), &vattr, flags, NULL);
- if (likely(!error))
- vn_revalidate(vn_from_inode(inode));
- return -error;
-}
-
-/*
- * block_truncate_page can return an error, but we can't propagate it
- * at all here. Leave a complaint + stack trace in the syslog because
- * this could be bad. If it is bad, we need to propagate the error further.
- */
-STATIC void
-xfs_vn_truncate(
- struct inode *inode)
-{
- int error;
- error = block_truncate_page(inode->i_mapping, inode->i_size,
- xfs_get_blocks);
- WARN_ON(error);
-}
-
-STATIC int
-xfs_vn_setxattr(
- struct dentry *dentry,
- const char *name,
- const void *data,
- size_t size,
- int flags)
-{
- bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
- char *attr = (char *)name;
- attrnames_t *namesp;
- int xflags = 0;
- int error;
-
- namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
- if (!namesp)
- return -EOPNOTSUPP;
- attr += namesp->attr_namelen;
- error = namesp->attr_capable(vp, NULL);
- if (error)
- return error;
-
- /* Convert Linux syscall to XFS internal ATTR flags */
- if (flags & XATTR_CREATE)
- xflags |= ATTR_CREATE;
- if (flags & XATTR_REPLACE)
- xflags |= ATTR_REPLACE;
- xflags |= namesp->attr_flag;
- return namesp->attr_set(vp, attr, (void *)data, size, xflags);
-}
-
-STATIC ssize_t
-xfs_vn_getxattr(
- struct dentry *dentry,
- const char *name,
- void *data,
- size_t size)
-{
- bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
- char *attr = (char *)name;
- attrnames_t *namesp;
- int xflags = 0;
- ssize_t error;
-
- namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
- if (!namesp)
- return -EOPNOTSUPP;
- attr += namesp->attr_namelen;
- error = namesp->attr_capable(vp, NULL);
- if (error)
- return error;
-
- /* Convert Linux syscall to XFS internal ATTR flags */
- if (!size) {
- xflags |= ATTR_KERNOVAL;
- data = NULL;
- }
- xflags |= namesp->attr_flag;
- return namesp->attr_get(vp, attr, (void *)data, size, xflags);
-}
-
-STATIC ssize_t
-xfs_vn_listxattr(
- struct dentry *dentry,
- char *data,
- size_t size)
-{
- bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
- int error, xflags = ATTR_KERNAMELS;
- ssize_t result;
-
- if (!size)
- xflags |= ATTR_KERNOVAL;
- xflags |= capable(CAP_SYS_ADMIN) ? ATTR_KERNFULLS : ATTR_KERNORMALS;
-
- error = attr_generic_list(vp, data, size, xflags, &result);
- if (error < 0)
- return error;
- return result;
-}
-
-STATIC int
-xfs_vn_removexattr(
- struct dentry *dentry,
- const char *name)
-{
- bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
- char *attr = (char *)name;
- attrnames_t *namesp;
- int xflags = 0;
- int error;
-
- namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
- if (!namesp)
- return -EOPNOTSUPP;
- attr += namesp->attr_namelen;
- error = namesp->attr_capable(vp, NULL);
- if (error)
- return error;
- xflags |= namesp->attr_flag;
- return namesp->attr_remove(vp, attr, xflags);
-}
-
-STATIC long
-xfs_vn_fallocate(
- struct inode *inode,
- int mode,
- loff_t offset,
- loff_t len)
-{
- long error;
- loff_t new_size = 0;
- xfs_flock64_t bf;
- xfs_inode_t *ip = XFS_I(inode);
-
- /* preallocation on directories not yet supported */
- error = -ENODEV;
- if (S_ISDIR(inode->i_mode))
- goto out_error;
-
- bf.l_whence = 0;
- bf.l_start = offset;
- bf.l_len = len;
-
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
- error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
- 0, NULL, ATTR_NOLOCK);
- if (!error && !(mode & FALLOC_FL_KEEP_SIZE) &&
- offset + len > i_size_read(inode))
- new_size = offset + len;
-
- /* Change file size if needed */
- if (new_size) {
- bhv_vattr_t va;
-
- va.va_mask = XFS_AT_SIZE;
- va.va_size = new_size;
- error = xfs_setattr(ip, &va, ATTR_NOLOCK, NULL);
- }
-
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-out_error:
- return error;
-}
-
-const struct inode_operations xfs_inode_operations = {
- .permission = xfs_vn_permission,
- .truncate = xfs_vn_truncate,
- .getattr = xfs_vn_getattr,
- .setattr = xfs_vn_setattr,
- .setxattr = xfs_vn_setxattr,
- .getxattr = xfs_vn_getxattr,
- .listxattr = xfs_vn_listxattr,
- .removexattr = xfs_vn_removexattr,
- .fallocate = xfs_vn_fallocate,
-};
-
-const struct inode_operations xfs_dir_inode_operations = {
- .create = xfs_vn_create,
- .lookup = xfs_vn_lookup,
- .link = xfs_vn_link,
- .unlink = xfs_vn_unlink,
- .symlink = xfs_vn_symlink,
- .mkdir = xfs_vn_mkdir,
- .rmdir = xfs_vn_rmdir,
- .mknod = xfs_vn_mknod,
- .rename = xfs_vn_rename,
- .permission = xfs_vn_permission,
- .getattr = xfs_vn_getattr,
- .setattr = xfs_vn_setattr,
- .setxattr = xfs_vn_setxattr,
- .getxattr = xfs_vn_getxattr,
- .listxattr = xfs_vn_listxattr,
- .removexattr = xfs_vn_removexattr,
-};
-
-const struct inode_operations xfs_symlink_inode_operations = {
- .readlink = generic_readlink,
- .follow_link = xfs_vn_follow_link,
- .put_link = xfs_vn_put_link,
- .permission = xfs_vn_permission,
- .getattr = xfs_vn_getattr,
- .setattr = xfs_vn_setattr,
- .setxattr = xfs_vn_setxattr,
- .getxattr = xfs_vn_getxattr,
- .listxattr = xfs_vn_listxattr,
- .removexattr = xfs_vn_removexattr,
-};
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
deleted file mode 100644
index 82333b3e118..00000000000
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ /dev/null
@@ -1,920 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
-#include "xfs_quota.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_rtalloc.h"
-#include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_inode_item.h"
-#include "xfs_buf_item.h"
-#include "xfs_utils.h"
-#include "xfs_iomap.h"
-#include "xfs_vnodeops.h"
-
-#include <linux/capability.h>
-#include <linux/mount.h>
-#include <linux/writeback.h>
-
-
-#if defined(XFS_RW_TRACE)
-void
-xfs_rw_enter_trace(
- int tag,
- xfs_inode_t *ip,
- void *data,
- size_t segs,
- loff_t offset,
- int ioflags)
-{
- if (ip->i_rwtrace == NULL)
- return;
- ktrace_enter(ip->i_rwtrace,
- (void *)(unsigned long)tag,
- (void *)ip,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)data,
- (void *)((unsigned long)segs),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)ioflags),
- (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
- (void *)((unsigned long)current_pid()),
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL);
-}
-
-void
-xfs_inval_cached_trace(
- xfs_inode_t *ip,
- xfs_off_t offset,
- xfs_off_t len,
- xfs_off_t first,
- xfs_off_t last)
-{
-
- if (ip->i_rwtrace == NULL)
- return;
- ktrace_enter(ip->i_rwtrace,
- (void *)(__psint_t)XFS_INVAL_CACHED,
- (void *)ip,
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)((len >> 32) & 0xffffffff)),
- (void *)((unsigned long)(len & 0xffffffff)),
- (void *)((unsigned long)((first >> 32) & 0xffffffff)),
- (void *)((unsigned long)(first & 0xffffffff)),
- (void *)((unsigned long)((last >> 32) & 0xffffffff)),
- (void *)((unsigned long)(last & 0xffffffff)),
- (void *)((unsigned long)current_pid()),
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL);
-}
-#endif
-
-/*
- * xfs_iozero
- *
- * xfs_iozero clears the specified range of buffer supplied,
- * and marks all the affected blocks as valid and modified. If
- * an affected block is not allocated, it will be allocated. If
- * an affected block is not completely overwritten, and is not
- * valid before the operation, it will be read from disk before
- * being partially zeroed.
- */
-STATIC int
-xfs_iozero(
- struct xfs_inode *ip, /* inode */
- loff_t pos, /* offset in file */
- size_t count) /* size of data to zero */
-{
- struct page *page;
- struct address_space *mapping;
- int status;
-
- mapping = ip->i_vnode->i_mapping;
- do {
- unsigned offset, bytes;
- void *fsdata;
-
- offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
- bytes = PAGE_CACHE_SIZE - offset;
- if (bytes > count)
- bytes = count;
-
- status = pagecache_write_begin(NULL, mapping, pos, bytes,
- AOP_FLAG_UNINTERRUPTIBLE,
- &page, &fsdata);
- if (status)
- break;
-
- zero_user(page, offset, bytes);
-
- status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
- page, fsdata);
- WARN_ON(status <= 0); /* can't return less than zero! */
- pos += bytes;
- count -= bytes;
- status = 0;
- } while (count);
-
- return (-status);
-}
-
-ssize_t /* bytes read, or (-) error */
-xfs_read(
- xfs_inode_t *ip,
- struct kiocb *iocb,
- const struct iovec *iovp,
- unsigned int segs,
- loff_t *offset,
- int ioflags)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- xfs_mount_t *mp = ip->i_mount;
- size_t size = 0;
- ssize_t ret = 0;
- xfs_fsize_t n;
- unsigned long seg;
-
-
- XFS_STATS_INC(xs_read_calls);
-
- /* START copy & waste from filemap.c */
- for (seg = 0; seg < segs; seg++) {
- const struct iovec *iv = &iovp[seg];
-
- /*
- * If any segment has a negative length, or the cumulative
- * length ever wraps negative then return -EINVAL.
- */
- size += iv->iov_len;
- if (unlikely((ssize_t)(size|iv->iov_len) < 0))
- return XFS_ERROR(-EINVAL);
- }
- /* END copy & waste from filemap.c */
-
- if (unlikely(ioflags & IO_ISDIRECT)) {
- xfs_buftarg_t *target =
- XFS_IS_REALTIME_INODE(ip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp;
- if ((*offset & target->bt_smask) ||
- (size & target->bt_smask)) {
- if (*offset == ip->i_size) {
- return (0);
- }
- return -XFS_ERROR(EINVAL);
- }
- }
-
- n = XFS_MAXIOFFSET(mp) - *offset;
- if ((n <= 0) || (size == 0))
- return 0;
-
- if (n < size)
- size = n;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- if (unlikely(ioflags & IO_ISDIRECT))
- mutex_lock(&inode->i_mutex);
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
-
- if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
- int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
- int iolock = XFS_IOLOCK_SHARED;
-
- ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size,
- dmflags, &iolock);
- if (ret) {
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- if (unlikely(ioflags & IO_ISDIRECT))
- mutex_unlock(&inode->i_mutex);
- return ret;
- }
- }
-
- if (unlikely(ioflags & IO_ISDIRECT)) {
- if (inode->i_mapping->nrpages)
- ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
- -1, FI_REMAPF_LOCKED);
- mutex_unlock(&inode->i_mutex);
- if (ret) {
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return ret;
- }
- }
-
- xfs_rw_enter_trace(XFS_READ_ENTER, ip,
- (void *)iovp, segs, *offset, ioflags);
-
- iocb->ki_pos = *offset;
- ret = generic_file_aio_read(iocb, iovp, segs, *offset);
- if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
- ret = wait_on_sync_kiocb(iocb);
- if (ret > 0)
- XFS_STATS_ADD(xs_read_bytes, ret);
-
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return ret;
-}
-
-ssize_t
-xfs_splice_read(
- xfs_inode_t *ip,
- struct file *infilp,
- loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t count,
- int flags,
- int ioflags)
-{
- xfs_mount_t *mp = ip->i_mount;
- ssize_t ret;
-
- XFS_STATS_INC(xs_read_calls);
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return -EIO;
-
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
-
- if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
- int iolock = XFS_IOLOCK_SHARED;
- int error;
-
- error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
- FILP_DELAY_FLAG(infilp), &iolock);
- if (error) {
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return -error;
- }
- }
- xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip,
- pipe, count, *ppos, ioflags);
- ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
- if (ret > 0)
- XFS_STATS_ADD(xs_read_bytes, ret);
-
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return ret;
-}
-
-ssize_t
-xfs_splice_write(
- xfs_inode_t *ip,
- struct pipe_inode_info *pipe,
- struct file *outfilp,
- loff_t *ppos,
- size_t count,
- int flags,
- int ioflags)
-{
- xfs_mount_t *mp = ip->i_mount;
- ssize_t ret;
- struct inode *inode = outfilp->f_mapping->host;
- xfs_fsize_t isize, new_size;
-
- XFS_STATS_INC(xs_write_calls);
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return -EIO;
-
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
-
- if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
- int iolock = XFS_IOLOCK_EXCL;
- int error;
-
- error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
- FILP_DELAY_FLAG(outfilp), &iolock);
- if (error) {
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return -error;
- }
- }
-
- new_size = *ppos + count;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (new_size > ip->i_size)
- ip->i_new_size = new_size;
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
- xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip,
- pipe, count, *ppos, ioflags);
- ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
- if (ret > 0)
- XFS_STATS_ADD(xs_write_bytes, ret);
-
- isize = i_size_read(inode);
- if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
- *ppos = isize;
-
- if (*ppos > ip->i_size) {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (*ppos > ip->i_size)
- ip->i_size = *ppos;
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
-
- if (ip->i_new_size) {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- ip->i_new_size = 0;
- if (ip->i_d.di_size > ip->i_size)
- ip->i_d.di_size = ip->i_size;
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return ret;
-}
-
-/*
- * This routine is called to handle zeroing any space in the last
- * block of the file that is beyond the EOF. We do this since the
- * size is being increased without writing anything to that block
- * and we don't want anyone to read the garbage on the disk.
- */
-STATIC int /* error (positive) */
-xfs_zero_last_block(
- xfs_inode_t *ip,
- xfs_fsize_t offset,
- xfs_fsize_t isize)
-{
- xfs_fileoff_t last_fsb;
- xfs_mount_t *mp = ip->i_mount;
- int nimaps;
- int zero_offset;
- int zero_len;
- int error = 0;
- xfs_bmbt_irec_t imap;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
- zero_offset = XFS_B_FSB_OFFSET(mp, isize);
- if (zero_offset == 0) {
- /*
- * There are no extra bytes in the last block on disk to
- * zero, so return.
- */
- return 0;
- }
-
- last_fsb = XFS_B_TO_FSBT(mp, isize);
- nimaps = 1;
- error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
- &nimaps, NULL, NULL);
- if (error) {
- return error;
- }
- ASSERT(nimaps > 0);
- /*
- * If the block underlying isize is just a hole, then there
- * is nothing to zero.
- */
- if (imap.br_startblock == HOLESTARTBLOCK) {
- return 0;
- }
- /*
- * Zero the part of the last block beyond the EOF, and write it
- * out sync. We need to drop the ilock while we do this so we
- * don't deadlock when the buffer cache calls back to us.
- */
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
- zero_len = mp->m_sb.sb_blocksize - zero_offset;
- if (isize + zero_len > offset)
- zero_len = offset - isize;
- error = xfs_iozero(ip, isize, zero_len);
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- ASSERT(error >= 0);
- return error;
-}
-
-/*
- * Zero any on disk space between the current EOF and the new,
- * larger EOF. This handles the normal case of zeroing the remainder
- * of the last block in the file and the unusual case of zeroing blocks
- * out beyond the size of the file. This second case only happens
- * with fixed size extents and when the system crashes before the inode
- * size was updated but after blocks were allocated. If fill is set,
- * then any holes in the range are filled and zeroed. If not, the holes
- * are left alone as holes.
- */
-
-int /* error (positive) */
-xfs_zero_eof(
- xfs_inode_t *ip,
- xfs_off_t offset, /* starting I/O offset */
- xfs_fsize_t isize) /* current inode size */
-{
- xfs_mount_t *mp = ip->i_mount;
- xfs_fileoff_t start_zero_fsb;
- xfs_fileoff_t end_zero_fsb;
- xfs_fileoff_t zero_count_fsb;
- xfs_fileoff_t last_fsb;
- xfs_fileoff_t zero_off;
- xfs_fsize_t zero_len;
- int nimaps;
- int error = 0;
- xfs_bmbt_irec_t imap;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
- ASSERT(offset > isize);
-
- /*
- * First handle zeroing the block on which isize resides.
- * We only zero a part of that block so it is handled specially.
- */
- error = xfs_zero_last_block(ip, offset, isize);
- if (error) {
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
- return error;
- }
-
- /*
- * Calculate the range between the new size and the old
- * where blocks needing to be zeroed may exist. To get the
- * block where the last byte in the file currently resides,
- * we need to subtract one from the size and truncate back
- * to a block boundary. We subtract 1 in case the size is
- * exactly on a block boundary.
- */
- last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
- start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
- end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
- ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
- if (last_fsb == end_zero_fsb) {
- /*
- * The size was only incremented on its last block.
- * We took care of that above, so just return.
- */
- return 0;
- }
-
- ASSERT(start_zero_fsb <= end_zero_fsb);
- while (start_zero_fsb <= end_zero_fsb) {
- nimaps = 1;
- zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
- error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
- 0, NULL, 0, &imap, &nimaps, NULL, NULL);
- if (error) {
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
- return error;
- }
- ASSERT(nimaps > 0);
-
- if (imap.br_state == XFS_EXT_UNWRITTEN ||
- imap.br_startblock == HOLESTARTBLOCK) {
- /*
- * This loop handles initializing pages that were
- * partially initialized by the code below this
- * loop. It basically zeroes the part of the page
- * that sits on a hole and sets the page as P_HOLE
- * and calls remapf if it is a mapped file.
- */
- start_zero_fsb = imap.br_startoff + imap.br_blockcount;
- ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
- continue;
- }
-
- /*
- * There are blocks we need to zero.
- * Drop the inode lock while we're doing the I/O.
- * We'll still have the iolock to protect us.
- */
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
- zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
- zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
-
- if ((zero_off + zero_len) > offset)
- zero_len = offset - zero_off;
-
- error = xfs_iozero(ip, zero_off, zero_len);
- if (error) {
- goto out_lock;
- }
-
- start_zero_fsb = imap.br_startoff + imap.br_blockcount;
- ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- }
-
- return 0;
-
-out_lock:
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- ASSERT(error >= 0);
- return error;
-}
-
-ssize_t /* bytes written, or (-) error */
-xfs_write(
- struct xfs_inode *xip,
- struct kiocb *iocb,
- const struct iovec *iovp,
- unsigned int nsegs,
- loff_t *offset,
- int ioflags)
-{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- unsigned long segs = nsegs;
- xfs_mount_t *mp;
- ssize_t ret = 0, error = 0;
- xfs_fsize_t isize, new_size;
- int iolock;
- int eventsent = 0;
- size_t ocount = 0, count;
- loff_t pos;
- int need_i_mutex;
-
- XFS_STATS_INC(xs_write_calls);
-
- error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
- if (error)
- return error;
-
- count = ocount;
- pos = *offset;
-
- if (count == 0)
- return 0;
-
- mp = xip->i_mount;
-
- xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
-relock:
- if (ioflags & IO_ISDIRECT) {
- iolock = XFS_IOLOCK_SHARED;
- need_i_mutex = 0;
- } else {
- iolock = XFS_IOLOCK_EXCL;
- need_i_mutex = 1;
- mutex_lock(&inode->i_mutex);
- }
-
- xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
-
-start:
- error = -generic_write_checks(file, &pos, &count,
- S_ISBLK(inode->i_mode));
- if (error) {
- xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
- goto out_unlock_mutex;
- }
-
- if ((DM_EVENT_ENABLED(xip, DM_EVENT_WRITE) &&
- !(ioflags & IO_INVIS) && !eventsent)) {
- int dmflags = FILP_DELAY_FLAG(file);
-
- if (need_i_mutex)
- dmflags |= DM_FLAGS_IMUX;
-
- xfs_iunlock(xip, XFS_ILOCK_EXCL);
- error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip,
- pos, count, dmflags, &iolock);
- if (error) {
- goto out_unlock_internal;
- }
- xfs_ilock(xip, XFS_ILOCK_EXCL);
- eventsent = 1;
-
- /*
- * The iolock was dropped and reacquired in XFS_SEND_DATA
- * so we have to recheck the size when appending.
- * We will only "goto start;" once, since having sent the
- * event prevents another call to XFS_SEND_DATA, which is
- * what allows the size to change in the first place.
- */
- if ((file->f_flags & O_APPEND) && pos != xip->i_size)
- goto start;
- }
-
- if (ioflags & IO_ISDIRECT) {
- xfs_buftarg_t *target =
- XFS_IS_REALTIME_INODE(xip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp;
-
- if ((pos & target->bt_smask) || (count & target->bt_smask)) {
- xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
- return XFS_ERROR(-EINVAL);
- }
-
- if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) {
- xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
- iolock = XFS_IOLOCK_EXCL;
- need_i_mutex = 1;
- mutex_lock(&inode->i_mutex);
- xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
- goto start;
- }
- }
-
- new_size = pos + count;
- if (new_size > xip->i_size)
- xip->i_new_size = new_size;
-
- /*
- * We're not supposed to change timestamps in readonly-mounted
- * filesystems. Throw it away if anyone asks us.
- */
- if (likely(!(ioflags & IO_INVIS) &&
- !mnt_want_write(file->f_path.mnt))) {
- file_update_time(file);
- xfs_ichgtime_fast(xip, inode,
- XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- mnt_drop_write(file->f_path.mnt);
- }
-
- /*
- * If the offset is beyond the size of the file, we have a couple
- * of things to do. First, if there is already space allocated
- * we need to either create holes or zero the disk or ...
- *
- * If there is a page where the previous size lands, we need
- * to zero it out up to the new size.
- */
-
- if (pos > xip->i_size) {
- error = xfs_zero_eof(xip, pos, xip->i_size);
- if (error) {
- xfs_iunlock(xip, XFS_ILOCK_EXCL);
- goto out_unlock_internal;
- }
- }
- xfs_iunlock(xip, XFS_ILOCK_EXCL);
-
- /*
- * If we're writing the file then make sure to clear the
- * setuid and setgid bits if the process is not being run
- * by root. This keeps people from modifying setuid and
- * setgid binaries.
- */
-
- if (((xip->i_d.di_mode & S_ISUID) ||
- ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
- (S_ISGID | S_IXGRP))) &&
- !capable(CAP_FSETID)) {
- error = xfs_write_clear_setuid(xip);
- if (likely(!error))
- error = -file_remove_suid(file);
- if (unlikely(error)) {
- goto out_unlock_internal;
- }
- }
-
-retry:
- /* We can write back this queue in page reclaim */
- current->backing_dev_info = mapping->backing_dev_info;
-
- if ((ioflags & IO_ISDIRECT)) {
- if (mapping->nrpages) {
- WARN_ON(need_i_mutex == 0);
- xfs_inval_cached_trace(xip, pos, -1,
- (pos & PAGE_CACHE_MASK), -1);
- error = xfs_flushinval_pages(xip,
- (pos & PAGE_CACHE_MASK),
- -1, FI_REMAPF_LOCKED);
- if (error)
- goto out_unlock_internal;
- }
-
- if (need_i_mutex) {
- /* demote the lock now the cached pages are gone */
- xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
- mutex_unlock(&inode->i_mutex);
-
- iolock = XFS_IOLOCK_SHARED;
- need_i_mutex = 0;
- }
-
- xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs,
- *offset, ioflags);
- ret = generic_file_direct_write(iocb, iovp,
- &segs, pos, offset, count, ocount);
-
- /*
- * direct-io write to a hole: fall through to buffered I/O
- * for completing the rest of the request.
- */
- if (ret >= 0 && ret != count) {
- XFS_STATS_ADD(xs_write_bytes, ret);
-
- pos += ret;
- count -= ret;
-
- ioflags &= ~IO_ISDIRECT;
- xfs_iunlock(xip, iolock);
- goto relock;
- }
- } else {
- xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
- *offset, ioflags);
- ret = generic_file_buffered_write(iocb, iovp, segs,
- pos, offset, count, ret);
- }
-
- current->backing_dev_info = NULL;
-
- if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
- ret = wait_on_sync_kiocb(iocb);
-
- if (ret == -ENOSPC &&
- DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
- xfs_iunlock(xip, iolock);
- if (need_i_mutex)
- mutex_unlock(&inode->i_mutex);
- error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip,
- DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL,
- 0, 0, 0); /* Delay flag intentionally unused */
- if (need_i_mutex)
- mutex_lock(&inode->i_mutex);
- xfs_ilock(xip, iolock);
- if (error)
- goto out_unlock_internal;
- pos = xip->i_size;
- ret = 0;
- goto retry;
- }
-
- isize = i_size_read(inode);
- if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
- *offset = isize;
-
- if (*offset > xip->i_size) {
- xfs_ilock(xip, XFS_ILOCK_EXCL);
- if (*offset > xip->i_size)
- xip->i_size = *offset;
- xfs_iunlock(xip, XFS_ILOCK_EXCL);
- }
-
- error = -ret;
- if (ret <= 0)
- goto out_unlock_internal;
-
- XFS_STATS_ADD(xs_write_bytes, ret);
-
- /* Handle various SYNC-type writes */
- if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
- int error2;
-
- xfs_iunlock(xip, iolock);
- if (need_i_mutex)
- mutex_unlock(&inode->i_mutex);
- error2 = sync_page_range(inode, mapping, pos, ret);
- if (!error)
- error = error2;
- if (need_i_mutex)
- mutex_lock(&inode->i_mutex);
- xfs_ilock(xip, iolock);
- error2 = xfs_write_sync_logforce(mp, xip);
- if (!error)
- error = error2;
- }
-
- out_unlock_internal:
- if (xip->i_new_size) {
- xfs_ilock(xip, XFS_ILOCK_EXCL);
- xip->i_new_size = 0;
- /*
- * If this was a direct or synchronous I/O that failed (such
- * as ENOSPC) then part of the I/O may have been written to
- * disk before the error occured. In this case the on-disk
- * file size may have been adjusted beyond the in-memory file
- * size and now needs to be truncated back.
- */
- if (xip->i_d.di_size > xip->i_size)
- xip->i_d.di_size = xip->i_size;
- xfs_iunlock(xip, XFS_ILOCK_EXCL);
- }
- xfs_iunlock(xip, iolock);
- out_unlock_mutex:
- if (need_i_mutex)
- mutex_unlock(&inode->i_mutex);
- return -error;
-}
-
-/*
- * All xfs metadata buffers except log state machine buffers
- * get this attached as their b_bdstrat callback function.
- * This is so that we can catch a buffer
- * after prematurely unpinning it to forcibly shutdown the filesystem.
- */
-int
-xfs_bdstrat_cb(struct xfs_buf *bp)
-{
- xfs_mount_t *mp;
-
- mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
- if (!XFS_FORCED_SHUTDOWN(mp)) {
- xfs_buf_iorequest(bp);
- return 0;
- } else {
- xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
- /*
- * Metadata write that didn't get logged but
- * written delayed anyway. These aren't associated
- * with a transaction, and can be ignored.
- */
- if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
- (XFS_BUF_ISREAD(bp)) == 0)
- return (xfs_bioerror_relse(bp));
- else
- return (xfs_bioerror(bp));
- }
-}
-
-/*
- * Wrapper around bdstrat so that we can stop data from going to disk in case
- * we are shutting down the filesystem. Typically user data goes thru this
- * path; one of the exceptions is the superblock.
- */
-void
-xfsbdstrat(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- ASSERT(mp);
- if (!XFS_FORCED_SHUTDOWN(mp)) {
- xfs_buf_iorequest(bp);
- return;
- }
-
- xfs_buftrace("XFSBDSTRAT IOERROR", bp);
- xfs_bioerror_relse(bp);
-}
-
-/*
- * If the underlying (data/log/rt) device is readonly, there are some
- * operations that cannot proceed.
- */
-int
-xfs_dev_is_read_only(
- xfs_mount_t *mp,
- char *message)
-{
- if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
- xfs_readonly_buftarg(mp->m_logdev_targp) ||
- (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
- cmn_err(CE_NOTE,
- "XFS: %s required on read-only device.", message);
- cmn_err(CE_NOTE,
- "XFS: write access unavailable, cannot proceed.");
- return EROFS;
- }
- return 0;
-}
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
deleted file mode 100644
index e6be37dbd0e..00000000000
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_LRW_H__
-#define __XFS_LRW_H__
-
-struct xfs_mount;
-struct xfs_inode;
-struct xfs_bmbt_irec;
-struct xfs_buf;
-struct xfs_iomap;
-
-#if defined(XFS_RW_TRACE)
-/*
- * Defines for the trace mechanisms in xfs_lrw.c.
- */
-#define XFS_RW_KTRACE_SIZE 128
-
-#define XFS_READ_ENTER 1
-#define XFS_WRITE_ENTER 2
-#define XFS_IOMAP_READ_ENTER 3
-#define XFS_IOMAP_WRITE_ENTER 4
-#define XFS_IOMAP_READ_MAP 5
-#define XFS_IOMAP_WRITE_MAP 6
-#define XFS_IOMAP_WRITE_NOSPACE 7
-#define XFS_ITRUNC_START 8
-#define XFS_ITRUNC_FINISH1 9
-#define XFS_ITRUNC_FINISH2 10
-#define XFS_CTRUNC1 11
-#define XFS_CTRUNC2 12
-#define XFS_CTRUNC3 13
-#define XFS_CTRUNC4 14
-#define XFS_CTRUNC5 15
-#define XFS_CTRUNC6 16
-#define XFS_BUNMAP 17
-#define XFS_INVAL_CACHED 18
-#define XFS_DIORD_ENTER 19
-#define XFS_DIOWR_ENTER 20
-#define XFS_WRITEPAGE_ENTER 22
-#define XFS_RELEASEPAGE_ENTER 23
-#define XFS_INVALIDPAGE_ENTER 24
-#define XFS_IOMAP_ALLOC_ENTER 25
-#define XFS_IOMAP_ALLOC_MAP 26
-#define XFS_IOMAP_UNWRITTEN 27
-#define XFS_SPLICE_READ_ENTER 28
-#define XFS_SPLICE_WRITE_ENTER 29
-extern void xfs_rw_enter_trace(int, struct xfs_inode *,
- void *, size_t, loff_t, int);
-extern void xfs_inval_cached_trace(struct xfs_inode *,
- xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t);
-#else
-#define xfs_rw_enter_trace(tag, ip, data, size, offset, ioflags)
-#define xfs_inval_cached_trace(ip, offset, len, first, last)
-#endif
-
-/* errors from xfsbdstrat() must be extracted from the buffer */
-extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
-extern int xfs_bdstrat_cb(struct xfs_buf *);
-extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
-
-extern int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
-
-#endif /* __XFS_LRW_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c
deleted file mode 100644
index e480b610205..00000000000
--- a/fs/xfs/linux-2.6/xfs_stats.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include <linux/proc_fs.h>
-
-DEFINE_PER_CPU(struct xfsstats, xfsstats);
-
-STATIC int
-xfs_read_xfsstats(
- char *buffer,
- char **start,
- off_t offset,
- int count,
- int *eof,
- void *data)
-{
- int c, i, j, len, val;
- __uint64_t xs_xstrat_bytes = 0;
- __uint64_t xs_write_bytes = 0;
- __uint64_t xs_read_bytes = 0;
-
- static const struct xstats_entry {
- char *desc;
- int endpoint;
- } xstats[] = {
- { "extent_alloc", XFSSTAT_END_EXTENT_ALLOC },
- { "abt", XFSSTAT_END_ALLOC_BTREE },
- { "blk_map", XFSSTAT_END_BLOCK_MAPPING },
- { "bmbt", XFSSTAT_END_BLOCK_MAP_BTREE },
- { "dir", XFSSTAT_END_DIRECTORY_OPS },
- { "trans", XFSSTAT_END_TRANSACTIONS },
- { "ig", XFSSTAT_END_INODE_OPS },
- { "log", XFSSTAT_END_LOG_OPS },
- { "push_ail", XFSSTAT_END_TAIL_PUSHING },
- { "xstrat", XFSSTAT_END_WRITE_CONVERT },
- { "rw", XFSSTAT_END_READ_WRITE_OPS },
- { "attr", XFSSTAT_END_ATTRIBUTE_OPS },
- { "icluster", XFSSTAT_END_INODE_CLUSTER },
- { "vnodes", XFSSTAT_END_VNODE_OPS },
- { "buf", XFSSTAT_END_BUF },
- };
-
- /* Loop over all stats groups */
- for (i=j=len = 0; i < ARRAY_SIZE(xstats); i++) {
- len += sprintf(buffer + len, xstats[i].desc);
- /* inner loop does each group */
- while (j < xstats[i].endpoint) {
- val = 0;
- /* sum over all cpus */
- for_each_possible_cpu(c)
- val += *(((__u32*)&per_cpu(xfsstats, c) + j));
- len += sprintf(buffer + len, " %u", val);
- j++;
- }
- buffer[len++] = '\n';
- }
- /* extra precision counters */
- for_each_possible_cpu(i) {
- xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
- xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
- xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
- }
-
- len += sprintf(buffer + len, "xpc %Lu %Lu %Lu\n",
- xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
- len += sprintf(buffer + len, "debug %u\n",
-#if defined(DEBUG)
- 1);
-#else
- 0);
-#endif
-
- if (offset >= len) {
- *start = buffer;
- *eof = 1;
- return 0;
- }
- *start = buffer + offset;
- if ((len -= offset) > count)
- return count;
- *eof = 1;
-
- return len;
-}
-
-void
-xfs_init_procfs(void)
-{
- if (!proc_mkdir("fs/xfs", NULL))
- return;
- create_proc_read_entry("fs/xfs/stat", 0, NULL, xfs_read_xfsstats, NULL);
-}
-
-void
-xfs_cleanup_procfs(void)
-{
- remove_proc_entry("fs/xfs/stat", NULL);
- remove_proc_entry("fs/xfs", NULL);
-}
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
deleted file mode 100644
index 943381284e2..00000000000
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ /dev/null
@@ -1,1488 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_clnt.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
-#include "xfs_quota.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
-#include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_fsops.h"
-#include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_utils.h"
-#include "xfs_vnodeops.h"
-#include "xfs_vfsops.h"
-#include "xfs_version.h"
-#include "xfs_log_priv.h"
-#include "xfs_trans_priv.h"
-
-#include <linux/namei.h>
-#include <linux/init.h>
-#include <linux/mount.h>
-#include <linux/mempool.h>
-#include <linux/writeback.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-
-static struct quotactl_ops xfs_quotactl_operations;
-static struct super_operations xfs_super_operations;
-static kmem_zone_t *xfs_vnode_zone;
-static kmem_zone_t *xfs_ioend_zone;
-mempool_t *xfs_ioend_pool;
-
-STATIC struct xfs_mount_args *
-xfs_args_allocate(
- struct super_block *sb,
- int silent)
-{
- struct xfs_mount_args *args;
-
- args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
- args->logbufs = args->logbufsize = -1;
- strncpy(args->fsname, sb->s_id, MAXNAMELEN);
-
- /* Copy the already-parsed mount(2) flags we're interested in */
- if (sb->s_flags & MS_DIRSYNC)
- args->flags |= XFSMNT_DIRSYNC;
- if (sb->s_flags & MS_SYNCHRONOUS)
- args->flags |= XFSMNT_WSYNC;
- if (silent)
- args->flags |= XFSMNT_QUIET;
- args->flags |= XFSMNT_32BITINODES;
-
- return args;
-}
-
-#define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
-#define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
-#define MNTOPT_LOGDEV "logdev" /* log device */
-#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
-#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
-#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
-#define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */
-#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
-#define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
-#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
-#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
-#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
-#define MNTOPT_MTPT "mtpt" /* filesystem mount point */
-#define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
-#define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
-#define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
-#define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
-#define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
-#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
-#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
- * unwritten extent conversion */
-#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
-#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
-#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
-#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
-#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
-#define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */
-#define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes
- * in stat(). */
-#define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
-#define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
-#define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
-#define MNTOPT_QUOTA "quota" /* disk quotas (user) */
-#define MNTOPT_NOQUOTA "noquota" /* no quotas */
-#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
-#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
-#define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
-#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
-#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
-#define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
-#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
-#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
-#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
-#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
-#define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */
-#define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */
-#define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */
-
-STATIC unsigned long
-suffix_strtoul(char *s, char **endp, unsigned int base)
-{
- int last, shift_left_factor = 0;
- char *value = s;
-
- last = strlen(value) - 1;
- if (value[last] == 'K' || value[last] == 'k') {
- shift_left_factor = 10;
- value[last] = '\0';
- }
- if (value[last] == 'M' || value[last] == 'm') {
- shift_left_factor = 20;
- value[last] = '\0';
- }
- if (value[last] == 'G' || value[last] == 'g') {
- shift_left_factor = 30;
- value[last] = '\0';
- }
-
- return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
-}
-
-STATIC int
-xfs_parseargs(
- struct xfs_mount *mp,
- char *options,
- struct xfs_mount_args *args,
- int update)
-{
- char *this_char, *value, *eov;
- int dsunit, dswidth, vol_dsunit, vol_dswidth;
- int iosize;
- int dmapi_implies_ikeep = 1;
-
- args->flags |= XFSMNT_BARRIER;
- args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
-
- if (!options)
- goto done;
-
- iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0;
-
- while ((this_char = strsep(&options, ",")) != NULL) {
- if (!*this_char)
- continue;
- if ((value = strchr(this_char, '=')) != NULL)
- *value++ = 0;
-
- if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
- if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
- this_char);
- return EINVAL;
- }
- args->logbufs = simple_strtoul(value, &eov, 10);
- } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
- if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
- this_char);
- return EINVAL;
- }
- args->logbufsize = suffix_strtoul(value, &eov, 10);
- } else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
- if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
- this_char);
- return EINVAL;
- }
- strncpy(args->logname, value, MAXNAMELEN);
- } else if (!strcmp(this_char, MNTOPT_MTPT)) {
- if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
- this_char);
- return EINVAL;
- }
- strncpy(args->mtpt, value, MAXNAMELEN);
- } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
- if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
- this_char);
- return EINVAL;
- }
- strncpy(args->rtname, value, MAXNAMELEN);
- } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
- if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
- this_char);
- return EINVAL;
- }
- iosize = simple_strtoul(value, &eov, 10);
- args->flags |= XFSMNT_IOSIZE;
- args->iosizelog = (uint8_t) iosize;
- } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
- if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
- this_char);
- return EINVAL;
- }
- iosize = suffix_strtoul(value, &eov, 10);
- args->flags |= XFSMNT_IOSIZE;
- args->iosizelog = ffs(iosize) - 1;
- } else if (!strcmp(this_char, MNTOPT_GRPID) ||
- !strcmp(this_char, MNTOPT_BSDGROUPS)) {
- mp->m_flags |= XFS_MOUNT_GRPID;
- } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
- !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
- mp->m_flags &= ~XFS_MOUNT_GRPID;
- } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
- args->flags |= XFSMNT_WSYNC;
- } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
- args->flags |= XFSMNT_OSYNCISOSYNC;
- } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
- args->flags |= XFSMNT_NORECOVERY;
- } else if (!strcmp(this_char, MNTOPT_INO64)) {
- args->flags |= XFSMNT_INO64;
-#if !XFS_BIG_INUMS
- cmn_err(CE_WARN,
- "XFS: %s option not allowed on this system",
- this_char);
- return EINVAL;
-#endif
- } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
- args->flags |= XFSMNT_NOALIGN;
- } else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
- args->flags |= XFSMNT_SWALLOC;
- } else if (!strcmp(this_char, MNTOPT_SUNIT)) {
- if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
- this_char);
- return EINVAL;
- }
- dsunit = simple_strtoul(value, &eov, 10);
- } else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
- if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
- this_char);
- return EINVAL;
- }
- dswidth = simple_strtoul(value, &eov, 10);
- } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
- args->flags &= ~XFSMNT_32BITINODES;
-#if !XFS_BIG_INUMS
- cmn_err(CE_WARN,
- "XFS: %s option not allowed on this system",
- this_char);
- return EINVAL;
-#endif
- } else if (!strcmp(this_char, MNTOPT_NOUUID)) {
- args->flags |= XFSMNT_NOUUID;
- } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
- args->flags |= XFSMNT_BARRIER;
- } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
- args->flags &= ~XFSMNT_BARRIER;
- } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
- args->flags |= XFSMNT_IKEEP;
- } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
- dmapi_implies_ikeep = 0;
- args->flags &= ~XFSMNT_IKEEP;
- } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
- args->flags2 &= ~XFSMNT2_COMPAT_IOSIZE;
- } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
- args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
- } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
- args->flags |= XFSMNT_ATTR2;
- } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
- args->flags &= ~XFSMNT_ATTR2;
- } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
- args->flags2 |= XFSMNT2_FILESTREAMS;
- } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
- args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA);
- args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA);
- } else if (!strcmp(this_char, MNTOPT_QUOTA) ||
- !strcmp(this_char, MNTOPT_UQUOTA) ||
- !strcmp(this_char, MNTOPT_USRQUOTA)) {
- args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF;
- } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
- !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
- args->flags |= XFSMNT_UQUOTA;
- args->flags &= ~XFSMNT_UQUOTAENF;
- } else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
- !strcmp(this_char, MNTOPT_PRJQUOTA)) {
- args->flags |= XFSMNT_PQUOTA | XFSMNT_PQUOTAENF;
- } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
- args->flags |= XFSMNT_PQUOTA;
- args->flags &= ~XFSMNT_PQUOTAENF;
- } else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
- !strcmp(this_char, MNTOPT_GRPQUOTA)) {
- args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF;
- } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
- args->flags |= XFSMNT_GQUOTA;
- args->flags &= ~XFSMNT_GQUOTAENF;
- } else if (!strcmp(this_char, MNTOPT_DMAPI)) {
- args->flags |= XFSMNT_DMAPI;
- } else if (!strcmp(this_char, MNTOPT_XDSM)) {
- args->flags |= XFSMNT_DMAPI;
- } else if (!strcmp(this_char, MNTOPT_DMI)) {
- args->flags |= XFSMNT_DMAPI;
- } else if (!strcmp(this_char, "ihashsize")) {
- cmn_err(CE_WARN,
- "XFS: ihashsize no longer used, option is deprecated.");
- } else if (!strcmp(this_char, "osyncisdsync")) {
- /* no-op, this is now the default */
- cmn_err(CE_WARN,
- "XFS: osyncisdsync is now the default, option is deprecated.");
- } else if (!strcmp(this_char, "irixsgid")) {
- cmn_err(CE_WARN,
- "XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
- } else {
- cmn_err(CE_WARN,
- "XFS: unknown mount option [%s].", this_char);
- return EINVAL;
- }
- }
-
- if (args->flags & XFSMNT_NORECOVERY) {
- if ((mp->m_flags & XFS_MOUNT_RDONLY) == 0) {
- cmn_err(CE_WARN,
- "XFS: no-recovery mounts must be read-only.");
- return EINVAL;
- }
- }
-
- if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) {
- cmn_err(CE_WARN,
- "XFS: sunit and swidth options incompatible with the noalign option");
- return EINVAL;
- }
-
- if ((args->flags & XFSMNT_GQUOTA) && (args->flags & XFSMNT_PQUOTA)) {
- cmn_err(CE_WARN,
- "XFS: cannot mount with both project and group quota");
- return EINVAL;
- }
-
- if ((args->flags & XFSMNT_DMAPI) && *args->mtpt == '\0') {
- printk("XFS: %s option needs the mount point option as well\n",
- MNTOPT_DMAPI);
- return EINVAL;
- }
-
- if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
- cmn_err(CE_WARN,
- "XFS: sunit and swidth must be specified together");
- return EINVAL;
- }
-
- if (dsunit && (dswidth % dsunit != 0)) {
- cmn_err(CE_WARN,
- "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)",
- dswidth, dsunit);
- return EINVAL;
- }
-
- /*
- * Applications using DMI filesystems often expect the
- * inode generation number to be monotonically increasing.
- * If we delete inode chunks we break this assumption, so
- * keep unused inode chunks on disk for DMI filesystems
- * until we come up with a better solution.
- * Note that if "ikeep" or "noikeep" mount options are
- * supplied, then they are honored.
- */
- if ((args->flags & XFSMNT_DMAPI) && dmapi_implies_ikeep)
- args->flags |= XFSMNT_IKEEP;
-
- if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
- if (dsunit) {
- args->sunit = dsunit;
- args->flags |= XFSMNT_RETERR;
- } else {
- args->sunit = vol_dsunit;
- }
- dswidth ? (args->swidth = dswidth) :
- (args->swidth = vol_dswidth);
- } else {
- args->sunit = args->swidth = 0;
- }
-
-done:
- if (args->flags & XFSMNT_32BITINODES)
- mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
- if (args->flags2)
- args->flags |= XFSMNT_FLAGS2;
- return 0;
-}
-
-struct proc_xfs_info {
- int flag;
- char *str;
-};
-
-STATIC int
-xfs_showargs(
- struct xfs_mount *mp,
- struct seq_file *m)
-{
- static struct proc_xfs_info xfs_info_set[] = {
- /* the few simple ones we can get from the mount struct */
- { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
- { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
- { XFS_MOUNT_INO64, "," MNTOPT_INO64 },
- { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
- { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
- { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
- { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
- { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
- { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
- { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
- { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI },
- { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
- { 0, NULL }
- };
- static struct proc_xfs_info xfs_info_unset[] = {
- /* the few simple ones we can get from the mount struct */
- { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO },
- { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER },
- { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE },
- { 0, NULL }
- };
- struct proc_xfs_info *xfs_infop;
-
- for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
- if (mp->m_flags & xfs_infop->flag)
- seq_puts(m, xfs_infop->str);
- }
- for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
- if (!(mp->m_flags & xfs_infop->flag))
- seq_puts(m, xfs_infop->str);
- }
-
- if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
- seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
- (int)(1 << mp->m_writeio_log) >> 10);
-
- if (mp->m_logbufs > 0)
- seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
- if (mp->m_logbsize > 0)
- seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
-
- if (mp->m_logname)
- seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
- if (mp->m_rtname)
- seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
-
- if (mp->m_dalign > 0)
- seq_printf(m, "," MNTOPT_SUNIT "=%d",
- (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
- if (mp->m_swidth > 0)
- seq_printf(m, "," MNTOPT_SWIDTH "=%d",
- (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
-
- if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
- seq_puts(m, "," MNTOPT_USRQUOTA);
- else if (mp->m_qflags & XFS_UQUOTA_ACCT)
- seq_puts(m, "," MNTOPT_UQUOTANOENF);
-
- if (mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
- seq_puts(m, "," MNTOPT_PRJQUOTA);
- else if (mp->m_qflags & XFS_PQUOTA_ACCT)
- seq_puts(m, "," MNTOPT_PQUOTANOENF);
-
- if (mp->m_qflags & (XFS_GQUOTA_ACCT|XFS_OQUOTA_ENFD))
- seq_puts(m, "," MNTOPT_GRPQUOTA);
- else if (mp->m_qflags & XFS_GQUOTA_ACCT)
- seq_puts(m, "," MNTOPT_GQUOTANOENF);
-
- if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
- seq_puts(m, "," MNTOPT_NOQUOTA);
-
- return 0;
-}
-__uint64_t
-xfs_max_file_offset(
- unsigned int blockshift)
-{
- unsigned int pagefactor = 1;
- unsigned int bitshift = BITS_PER_LONG - 1;
-
- /* Figure out maximum filesize, on Linux this can depend on
- * the filesystem blocksize (on 32 bit platforms).
- * __block_prepare_write does this in an [unsigned] long...
- * page->index << (PAGE_CACHE_SHIFT - bbits)
- * So, for page sized blocks (4K on 32 bit platforms),
- * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
- * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
- * but for smaller blocksizes it is less (bbits = log2 bsize).
- * Note1: get_block_t takes a long (implicit cast from above)
- * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
- * can optionally convert the [unsigned] long from above into
- * an [unsigned] long long.
- */
-
-#if BITS_PER_LONG == 32
-# if defined(CONFIG_LBD)
- ASSERT(sizeof(sector_t) == 8);
- pagefactor = PAGE_CACHE_SIZE;
- bitshift = BITS_PER_LONG;
-# else
- pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
-# endif
-#endif
-
- return (((__uint64_t)pagefactor) << bitshift) - 1;
-}
-
-STATIC_INLINE void
-xfs_set_inodeops(
- struct inode *inode)
-{
- switch (inode->i_mode & S_IFMT) {
- case S_IFREG:
- inode->i_op = &xfs_inode_operations;
- inode->i_fop = &xfs_file_operations;
- inode->i_mapping->a_ops = &xfs_address_space_operations;
- break;
- case S_IFDIR:
- inode->i_op = &xfs_dir_inode_operations;
- inode->i_fop = &xfs_dir_file_operations;
- break;
- case S_IFLNK:
- inode->i_op = &xfs_symlink_inode_operations;
- if (!(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE))
- inode->i_mapping->a_ops = &xfs_address_space_operations;
- break;
- default:
- inode->i_op = &xfs_inode_operations;
- init_special_inode(inode, inode->i_mode, inode->i_rdev);
- break;
- }
-}
-
-STATIC_INLINE void
-xfs_revalidate_inode(
- xfs_mount_t *mp,
- bhv_vnode_t *vp,
- xfs_inode_t *ip)
-{
- struct inode *inode = vn_to_inode(vp);
-
- inode->i_mode = ip->i_d.di_mode;
- inode->i_nlink = ip->i_d.di_nlink;
- inode->i_uid = ip->i_d.di_uid;
- inode->i_gid = ip->i_d.di_gid;
-
- switch (inode->i_mode & S_IFMT) {
- case S_IFBLK:
- case S_IFCHR:
- inode->i_rdev =
- MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
- sysv_minor(ip->i_df.if_u2.if_rdev));
- break;
- default:
- inode->i_rdev = 0;
- break;
- }
-
- inode->i_generation = ip->i_d.di_gen;
- i_size_write(inode, ip->i_d.di_size);
- inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
- inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
- inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
- inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
- inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
- inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
- if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
- inode->i_flags |= S_IMMUTABLE;
- else
- inode->i_flags &= ~S_IMMUTABLE;
- if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
- inode->i_flags |= S_APPEND;
- else
- inode->i_flags &= ~S_APPEND;
- if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
- inode->i_flags |= S_SYNC;
- else
- inode->i_flags &= ~S_SYNC;
- if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
- inode->i_flags |= S_NOATIME;
- else
- inode->i_flags &= ~S_NOATIME;
- xfs_iflags_clear(ip, XFS_IMODIFIED);
-}
-
-void
-xfs_initialize_vnode(
- struct xfs_mount *mp,
- bhv_vnode_t *vp,
- struct xfs_inode *ip)
-{
- struct inode *inode = vn_to_inode(vp);
-
- if (!ip->i_vnode) {
- ip->i_vnode = vp;
- inode->i_private = ip;
- }
-
- /*
- * We need to set the ops vectors, and unlock the inode, but if
- * we have been called during the new inode create process, it is
- * too early to fill in the Linux inode. We will get called a
- * second time once the inode is properly set up, and then we can
- * finish our work.
- */
- if (ip->i_d.di_mode != 0 && (inode->i_state & I_NEW)) {
- xfs_revalidate_inode(mp, vp, ip);
- xfs_set_inodeops(inode);
-
- xfs_iflags_clear(ip, XFS_INEW);
- barrier();
-
- unlock_new_inode(inode);
- }
-}
-
-int
-xfs_blkdev_get(
- xfs_mount_t *mp,
- const char *name,
- struct block_device **bdevp)
-{
- int error = 0;
-
- *bdevp = open_bdev_excl(name, 0, mp);
- if (IS_ERR(*bdevp)) {
- error = PTR_ERR(*bdevp);
- printk("XFS: Invalid device [%s], error=%d\n", name, error);
- }
-
- return -error;
-}
-
-void
-xfs_blkdev_put(
- struct block_device *bdev)
-{
- if (bdev)
- close_bdev_excl(bdev);
-}
-
-/*
- * Try to write out the superblock using barriers.
- */
-STATIC int
-xfs_barrier_test(
- xfs_mount_t *mp)
-{
- xfs_buf_t *sbp = xfs_getsb(mp, 0);
- int error;
-
- XFS_BUF_UNDONE(sbp);
- XFS_BUF_UNREAD(sbp);
- XFS_BUF_UNDELAYWRITE(sbp);
- XFS_BUF_WRITE(sbp);
- XFS_BUF_UNASYNC(sbp);
- XFS_BUF_ORDERED(sbp);
-
- xfsbdstrat(mp, sbp);
- error = xfs_iowait(sbp);
-
- /*
- * Clear all the flags we set and possible error state in the
- * buffer. We only did the write to try out whether barriers
- * worked and shouldn't leave any traces in the superblock
- * buffer.
- */
- XFS_BUF_DONE(sbp);
- XFS_BUF_ERROR(sbp, 0);
- XFS_BUF_UNORDERED(sbp);
-
- xfs_buf_relse(sbp);
- return error;
-}
-
-void
-xfs_mountfs_check_barriers(xfs_mount_t *mp)
-{
- int error;
-
- if (mp->m_logdev_targp != mp->m_ddev_targp) {
- xfs_fs_cmn_err(CE_NOTE, mp,
- "Disabling barriers, not supported with external log device");
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- return;
- }
-
- if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
- QUEUE_ORDERED_NONE) {
- xfs_fs_cmn_err(CE_NOTE, mp,
- "Disabling barriers, not supported by the underlying device");
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- return;
- }
-
- if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
- xfs_fs_cmn_err(CE_NOTE, mp,
- "Disabling barriers, underlying device is readonly");
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- return;
- }
-
- error = xfs_barrier_test(mp);
- if (error) {
- xfs_fs_cmn_err(CE_NOTE, mp,
- "Disabling barriers, trial barrier write failed");
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- return;
- }
-}
-
-void
-xfs_blkdev_issue_flush(
- xfs_buftarg_t *buftarg)
-{
- blkdev_issue_flush(buftarg->bt_bdev, NULL);
-}
-
-/*
- * XFS AIL push thread support
- */
-void
-xfsaild_wakeup(
- xfs_mount_t *mp,
- xfs_lsn_t threshold_lsn)
-{
- mp->m_ail.xa_target = threshold_lsn;
- wake_up_process(mp->m_ail.xa_task);
-}
-
-int
-xfsaild(
- void *data)
-{
- xfs_mount_t *mp = (xfs_mount_t *)data;
- xfs_lsn_t last_pushed_lsn = 0;
- long tout = 0;
-
- while (!kthread_should_stop()) {
- if (tout)
- schedule_timeout_interruptible(msecs_to_jiffies(tout));
- tout = 1000;
-
- /* swsusp */
- try_to_freeze();
-
- ASSERT(mp->m_log);
- if (XFS_FORCED_SHUTDOWN(mp))
- continue;
-
- tout = xfsaild_push(mp, &last_pushed_lsn);
- }
-
- return 0;
-} /* xfsaild */
-
-int
-xfsaild_start(
- xfs_mount_t *mp)
-{
- mp->m_ail.xa_target = 0;
- mp->m_ail.xa_task = kthread_run(xfsaild, mp, "xfsaild");
- if (IS_ERR(mp->m_ail.xa_task))
- return -PTR_ERR(mp->m_ail.xa_task);
- return 0;
-}
-
-void
-xfsaild_stop(
- xfs_mount_t *mp)
-{
- kthread_stop(mp->m_ail.xa_task);
-}
-
-
-
-STATIC struct inode *
-xfs_fs_alloc_inode(
- struct super_block *sb)
-{
- bhv_vnode_t *vp;
-
- vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
- if (unlikely(!vp))
- return NULL;
- return vn_to_inode(vp);
-}
-
-STATIC void
-xfs_fs_destroy_inode(
- struct inode *inode)
-{
- kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
-}
-
-STATIC void
-xfs_fs_inode_init_once(
- void *vnode)
-{
- inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
-}
-
-STATIC int __init
-xfs_init_zones(void)
-{
- xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
- KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
- KM_ZONE_SPREAD,
- xfs_fs_inode_init_once);
- if (!xfs_vnode_zone)
- goto out;
-
- xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
- if (!xfs_ioend_zone)
- goto out_destroy_vnode_zone;
-
- xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
- xfs_ioend_zone);
- if (!xfs_ioend_pool)
- goto out_free_ioend_zone;
- return 0;
-
- out_free_ioend_zone:
- kmem_zone_destroy(xfs_ioend_zone);
- out_destroy_vnode_zone:
- kmem_zone_destroy(xfs_vnode_zone);
- out:
- return -ENOMEM;
-}
-
-STATIC void
-xfs_destroy_zones(void)
-{
- mempool_destroy(xfs_ioend_pool);
- kmem_zone_destroy(xfs_vnode_zone);
- kmem_zone_destroy(xfs_ioend_zone);
-}
-
-/*
- * Attempt to flush the inode, this will actually fail
- * if the inode is pinned, but we dirty the inode again
- * at the point when it is unpinned after a log write,
- * since this is when the inode itself becomes flushable.
- */
-STATIC int
-xfs_fs_write_inode(
- struct inode *inode,
- int sync)
-{
- int error = 0;
- int flags = 0;
-
- xfs_itrace_entry(XFS_I(inode));
- if (sync) {
- filemap_fdatawait(inode->i_mapping);
- flags |= FLUSH_SYNC;
- }
- error = xfs_inode_flush(XFS_I(inode), flags);
- /*
- * if we failed to write out the inode then mark
- * it dirty again so we'll try again later.
- */
- if (error)
- mark_inode_dirty_sync(inode);
-
- return -error;
-}
-
-STATIC void
-xfs_fs_clear_inode(
- struct inode *inode)
-{
- xfs_inode_t *ip = XFS_I(inode);
-
- /*
- * ip can be null when xfs_iget_core calls xfs_idestroy if we
- * find an inode with di_mode == 0 but without IGET_CREATE set.
- */
- if (ip) {
- xfs_itrace_entry(ip);
- XFS_STATS_INC(vn_rele);
- XFS_STATS_INC(vn_remove);
- XFS_STATS_INC(vn_reclaim);
- XFS_STATS_DEC(vn_active);
-
- xfs_inactive(ip);
- xfs_iflags_clear(ip, XFS_IMODIFIED);
- if (xfs_reclaim(ip))
- panic("%s: cannot reclaim 0x%p\n", __func__, inode);
- }
-
- ASSERT(XFS_I(inode) == NULL);
-}
-
-/*
- * Enqueue a work item to be picked up by the vfs xfssyncd thread.
- * Doing this has two advantages:
- * - It saves on stack space, which is tight in certain situations
- * - It can be used (with care) as a mechanism to avoid deadlocks.
- * Flushing while allocating in a full filesystem requires both.
- */
-STATIC void
-xfs_syncd_queue_work(
- struct xfs_mount *mp,
- void *data,
- void (*syncer)(struct xfs_mount *, void *))
-{
- struct bhv_vfs_sync_work *work;
-
- work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
- INIT_LIST_HEAD(&work->w_list);
- work->w_syncer = syncer;
- work->w_data = data;
- work->w_mount = mp;
- spin_lock(&mp->m_sync_lock);
- list_add_tail(&work->w_list, &mp->m_sync_list);
- spin_unlock(&mp->m_sync_lock);
- wake_up_process(mp->m_sync_task);
-}
-
-/*
- * Flush delayed allocate data, attempting to free up reserved space
- * from existing allocations. At this point a new allocation attempt
- * has failed with ENOSPC and we are in the process of scratching our
- * heads, looking about for more room...
- */
-STATIC void
-xfs_flush_inode_work(
- struct xfs_mount *mp,
- void *arg)
-{
- struct inode *inode = arg;
- filemap_flush(inode->i_mapping);
- iput(inode);
-}
-
-void
-xfs_flush_inode(
- xfs_inode_t *ip)
-{
- struct inode *inode = ip->i_vnode;
-
- igrab(inode);
- xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
- delay(msecs_to_jiffies(500));
-}
-
-/*
- * This is the "bigger hammer" version of xfs_flush_inode_work...
- * (IOW, "If at first you don't succeed, use a Bigger Hammer").
- */
-STATIC void
-xfs_flush_device_work(
- struct xfs_mount *mp,
- void *arg)
-{
- struct inode *inode = arg;
- sync_blockdev(mp->m_super->s_bdev);
- iput(inode);
-}
-
-void
-xfs_flush_device(
- xfs_inode_t *ip)
-{
- struct inode *inode = vn_to_inode(XFS_ITOV(ip));
-
- igrab(inode);
- xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
- delay(msecs_to_jiffies(500));
- xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
-}
-
-STATIC void
-xfs_sync_worker(
- struct xfs_mount *mp,
- void *unused)
-{
- int error;
-
- if (!(mp->m_flags & XFS_MOUNT_RDONLY))
- error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR);
- mp->m_sync_seq++;
- wake_up(&mp->m_wait_single_sync_task);
-}
-
-STATIC int
-xfssyncd(
- void *arg)
-{
- struct xfs_mount *mp = arg;
- long timeleft;
- bhv_vfs_sync_work_t *work, *n;
- LIST_HEAD (tmp);
-
- set_freezable();
- timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
- for (;;) {
- timeleft = schedule_timeout_interruptible(timeleft);
- /* swsusp */
- try_to_freeze();
- if (kthread_should_stop() && list_empty(&mp->m_sync_list))
- break;
-
- spin_lock(&mp->m_sync_lock);
- /*
- * We can get woken by laptop mode, to do a sync -
- * that's the (only!) case where the list would be
- * empty with time remaining.
- */
- if (!timeleft || list_empty(&mp->m_sync_list)) {
- if (!timeleft)
- timeleft = xfs_syncd_centisecs *
- msecs_to_jiffies(10);
- INIT_LIST_HEAD(&mp->m_sync_work.w_list);
- list_add_tail(&mp->m_sync_work.w_list,
- &mp->m_sync_list);
- }
- list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
- list_move(&work->w_list, &tmp);
- spin_unlock(&mp->m_sync_lock);
-
- list_for_each_entry_safe(work, n, &tmp, w_list) {
- (*work->w_syncer)(mp, work->w_data);
- list_del(&work->w_list);
- if (work == &mp->m_sync_work)
- continue;
- kmem_free(work, sizeof(struct bhv_vfs_sync_work));
- }
- }
-
- return 0;
-}
-
-STATIC void
-xfs_fs_put_super(
- struct super_block *sb)
-{
- struct xfs_mount *mp = XFS_M(sb);
- int error;
-
- kthread_stop(mp->m_sync_task);
-
- xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI);
- error = xfs_unmount(mp, 0, NULL);
- if (error)
- printk("XFS: unmount got error=%d\n", error);
-}
-
-STATIC void
-xfs_fs_write_super(
- struct super_block *sb)
-{
- if (!(sb->s_flags & MS_RDONLY))
- xfs_sync(XFS_M(sb), SYNC_FSDATA);
- sb->s_dirt = 0;
-}
-
-STATIC int
-xfs_fs_sync_super(
- struct super_block *sb,
- int wait)
-{
- struct xfs_mount *mp = XFS_M(sb);
- int error;
- int flags;
-
- /*
- * Treat a sync operation like a freeze. This is to work
- * around a race in sync_inodes() which works in two phases
- * - an asynchronous flush, which can write out an inode
- * without waiting for file size updates to complete, and a
- * synchronous flush, which wont do anything because the
- * async flush removed the inode's dirty flag. Also
- * sync_inodes() will not see any files that just have
- * outstanding transactions to be flushed because we don't
- * dirty the Linux inode until after the transaction I/O
- * completes.
- */
- if (wait || unlikely(sb->s_frozen == SB_FREEZE_WRITE)) {
- /*
- * First stage of freeze - no more writers will make progress
- * now we are here, so we flush delwri and delalloc buffers
- * here, then wait for all I/O to complete. Data is frozen at
- * that point. Metadata is not frozen, transactions can still
- * occur here so don't bother flushing the buftarg (i.e
- * SYNC_QUIESCE) because it'll just get dirty again.
- */
- flags = SYNC_DATA_QUIESCE;
- } else
- flags = SYNC_FSDATA;
-
- error = xfs_sync(mp, flags);
- sb->s_dirt = 0;
-
- if (unlikely(laptop_mode)) {
- int prev_sync_seq = mp->m_sync_seq;
-
- /*
- * The disk must be active because we're syncing.
- * We schedule xfssyncd now (now that the disk is
- * active) instead of later (when it might not be).
- */
- wake_up_process(mp->m_sync_task);
- /*
- * We have to wait for the sync iteration to complete.
- * If we don't, the disk activity caused by the sync
- * will come after the sync is completed, and that
- * triggers another sync from laptop mode.
- */
- wait_event(mp->m_wait_single_sync_task,
- mp->m_sync_seq != prev_sync_seq);
- }
-
- return -error;
-}
-
-STATIC int
-xfs_fs_statfs(
- struct dentry *dentry,
- struct kstatfs *statp)
-{
- struct xfs_mount *mp = XFS_M(dentry->d_sb);
- xfs_sb_t *sbp = &mp->m_sb;
- __uint64_t fakeinos, id;
- xfs_extlen_t lsize;
-
- statp->f_type = XFS_SB_MAGIC;
- statp->f_namelen = MAXNAMELEN - 1;
-
- id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
- statp->f_fsid.val[0] = (u32)id;
- statp->f_fsid.val[1] = (u32)(id >> 32);
-
- xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
-
- spin_lock(&mp->m_sb_lock);
- statp->f_bsize = sbp->sb_blocksize;
- lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
- statp->f_blocks = sbp->sb_dblocks - lsize;
- statp->f_bfree = statp->f_bavail =
- sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
- fakeinos = statp->f_bfree << sbp->sb_inopblog;
-#if XFS_BIG_INUMS
- fakeinos += mp->m_inoadd;
-#endif
- statp->f_files =
- MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
- if (mp->m_maxicount)
-#if XFS_BIG_INUMS
- if (!mp->m_inoadd)
-#endif
- statp->f_files = min_t(typeof(statp->f_files),
- statp->f_files,
- mp->m_maxicount);
- statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
- spin_unlock(&mp->m_sb_lock);
-
- XFS_QM_DQSTATVFS(XFS_I(dentry->d_inode), statp);
- return 0;
-}
-
-STATIC int
-xfs_fs_remount(
- struct super_block *sb,
- int *flags,
- char *options)
-{
- struct xfs_mount *mp = XFS_M(sb);
- struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
- int error;
-
- error = xfs_parseargs(mp, options, args, 1);
- if (!error)
- error = xfs_mntupdate(mp, flags, args);
- kmem_free(args, sizeof(*args));
- return -error;
-}
-
-/*
- * Second stage of a freeze. The data is already frozen so we only
- * need to take care of themetadata. Once that's done write a dummy
- * record to dirty the log in case of a crash while frozen.
- */
-STATIC void
-xfs_fs_lockfs(
- struct super_block *sb)
-{
- struct xfs_mount *mp = XFS_M(sb);
-
- xfs_attr_quiesce(mp);
- xfs_fs_log_dummy(mp);
-}
-
-STATIC int
-xfs_fs_show_options(
- struct seq_file *m,
- struct vfsmount *mnt)
-{
- return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
-}
-
-STATIC int
-xfs_fs_quotasync(
- struct super_block *sb,
- int type)
-{
- return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XQUOTASYNC, 0, NULL);
-}
-
-STATIC int
-xfs_fs_getxstate(
- struct super_block *sb,
- struct fs_quota_stat *fqs)
-{
- return -XFS_QM_QUOTACTL(XFS_M(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
-}
-
-STATIC int
-xfs_fs_setxstate(
- struct super_block *sb,
- unsigned int flags,
- int op)
-{
- return -XFS_QM_QUOTACTL(XFS_M(sb), op, 0, (caddr_t)&flags);
-}
-
-STATIC int
-xfs_fs_getxquota(
- struct super_block *sb,
- int type,
- qid_t id,
- struct fs_disk_quota *fdq)
-{
- return -XFS_QM_QUOTACTL(XFS_M(sb),
- (type == USRQUOTA) ? Q_XGETQUOTA :
- ((type == GRPQUOTA) ? Q_XGETGQUOTA :
- Q_XGETPQUOTA), id, (caddr_t)fdq);
-}
-
-STATIC int
-xfs_fs_setxquota(
- struct super_block *sb,
- int type,
- qid_t id,
- struct fs_disk_quota *fdq)
-{
- return -XFS_QM_QUOTACTL(XFS_M(sb),
- (type == USRQUOTA) ? Q_XSETQLIM :
- ((type == GRPQUOTA) ? Q_XSETGQLIM :
- Q_XSETPQLIM), id, (caddr_t)fdq);
-}
-
-STATIC int
-xfs_fs_fill_super(
- struct super_block *sb,
- void *data,
- int silent)
-{
- struct inode *root;
- struct xfs_mount *mp = NULL;
- struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
- int error;
-
- mp = xfs_mount_init();
-
- INIT_LIST_HEAD(&mp->m_sync_list);
- spin_lock_init(&mp->m_sync_lock);
- init_waitqueue_head(&mp->m_wait_single_sync_task);
-
- mp->m_super = sb;
- sb->s_fs_info = mp;
-
- if (sb->s_flags & MS_RDONLY)
- mp->m_flags |= XFS_MOUNT_RDONLY;
-
- error = xfs_parseargs(mp, (char *)data, args, 0);
- if (error)
- goto fail_vfsop;
-
- sb_min_blocksize(sb, BBSIZE);
- sb->s_export_op = &xfs_export_operations;
- sb->s_qcop = &xfs_quotactl_operations;
- sb->s_op = &xfs_super_operations;
-
- error = xfs_mount(mp, args, NULL);
- if (error)
- goto fail_vfsop;
-
- sb->s_dirt = 1;
- sb->s_magic = XFS_SB_MAGIC;
- sb->s_blocksize = mp->m_sb.sb_blocksize;
- sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
- sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
- sb->s_time_gran = 1;
- set_posix_acl_flag(sb);
-
- root = igrab(mp->m_rootip->i_vnode);
- if (!root) {
- error = ENOENT;
- goto fail_unmount;
- }
- if (is_bad_inode(root)) {
- error = EINVAL;
- goto fail_vnrele;
- }
- sb->s_root = d_alloc_root(root);
- if (!sb->s_root) {
- error = ENOMEM;
- goto fail_vnrele;
- }
-
- mp->m_sync_work.w_syncer = xfs_sync_worker;
- mp->m_sync_work.w_mount = mp;
- mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
- if (IS_ERR(mp->m_sync_task)) {
- error = -PTR_ERR(mp->m_sync_task);
- goto fail_vnrele;
- }
-
- xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
-
- kmem_free(args, sizeof(*args));
- return 0;
-
-fail_vnrele:
- if (sb->s_root) {
- dput(sb->s_root);
- sb->s_root = NULL;
- } else {
- iput(root);
- }
-
-fail_unmount:
- xfs_unmount(mp, 0, NULL);
-
-fail_vfsop:
- kmem_free(args, sizeof(*args));
- return -error;
-}
-
-STATIC int
-xfs_fs_get_sb(
- struct file_system_type *fs_type,
- int flags,
- const char *dev_name,
- void *data,
- struct vfsmount *mnt)
-{
- return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
- mnt);
-}
-
-static struct super_operations xfs_super_operations = {
- .alloc_inode = xfs_fs_alloc_inode,
- .destroy_inode = xfs_fs_destroy_inode,
- .write_inode = xfs_fs_write_inode,
- .clear_inode = xfs_fs_clear_inode,
- .put_super = xfs_fs_put_super,
- .write_super = xfs_fs_write_super,
- .sync_fs = xfs_fs_sync_super,
- .write_super_lockfs = xfs_fs_lockfs,
- .statfs = xfs_fs_statfs,
- .remount_fs = xfs_fs_remount,
- .show_options = xfs_fs_show_options,
-};
-
-static struct quotactl_ops xfs_quotactl_operations = {
- .quota_sync = xfs_fs_quotasync,
- .get_xstate = xfs_fs_getxstate,
- .set_xstate = xfs_fs_setxstate,
- .get_xquota = xfs_fs_getxquota,
- .set_xquota = xfs_fs_setxquota,
-};
-
-static struct file_system_type xfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "xfs",
- .get_sb = xfs_fs_get_sb,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
-};
-
-
-STATIC int __init
-init_xfs_fs( void )
-{
- int error;
- static char message[] __initdata = KERN_INFO \
- XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
-
- printk(message);
-
- ktrace_init(64);
-
- error = xfs_init_zones();
- if (error < 0)
- goto undo_zones;
-
- error = xfs_buf_init();
- if (error < 0)
- goto undo_buffers;
-
- vn_init();
- xfs_init();
- uuid_init();
- vfs_initquota();
-
- error = register_filesystem(&xfs_fs_type);
- if (error)
- goto undo_register;
- return 0;
-
-undo_register:
- xfs_buf_terminate();
-
-undo_buffers:
- xfs_destroy_zones();
-
-undo_zones:
- return error;
-}
-
-STATIC void __exit
-exit_xfs_fs( void )
-{
- vfs_exitquota();
- unregister_filesystem(&xfs_fs_type);
- xfs_cleanup();
- xfs_buf_terminate();
- xfs_destroy_zones();
- ktrace_uninit();
-}
-
-module_init(init_xfs_fs);
-module_exit(exit_xfs_fs);
-
-MODULE_AUTHOR("Silicon Graphics, Inc.");
-MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
-MODULE_LICENSE("GPL");
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
deleted file mode 100644
index 7e60c7776b1..00000000000
--- a/fs/xfs/linux-2.6/xfs_vfs.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_VFS_H__
-#define __XFS_VFS_H__
-
-#include <linux/vfs.h>
-#include "xfs_fs.h"
-
-struct inode;
-
-struct fid;
-struct cred;
-struct seq_file;
-struct super_block;
-struct xfs_inode;
-struct xfs_mount;
-struct xfs_mount_args;
-
-typedef struct kstatfs bhv_statvfs_t;
-
-typedef struct bhv_vfs_sync_work {
- struct list_head w_list;
- struct xfs_mount *w_mount;
- void *w_data; /* syncer routine argument */
- void (*w_syncer)(struct xfs_mount *, void *);
-} bhv_vfs_sync_work_t;
-
-#define SYNC_ATTR 0x0001 /* sync attributes */
-#define SYNC_CLOSE 0x0002 /* close file system down */
-#define SYNC_DELWRI 0x0004 /* look at delayed writes */
-#define SYNC_WAIT 0x0008 /* wait for i/o to complete */
-#define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */
-#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */
-#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */
-#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */
-#define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */
-
-/*
- * When remounting a filesystem read-only or freezing the filesystem,
- * we have two phases to execute. This first phase is syncing the data
- * before we quiesce the fielsystem, and the second is flushing all the
- * inodes out after we've waited for all the transactions created by
- * the first phase to complete. The second phase uses SYNC_INODE_QUIESCE
- * to ensure that the inodes are written to their location on disk
- * rather than just existing in transactions in the log. This means
- * after a quiesce there is no log replay required to write the inodes
- * to disk (this is the main difference between a sync and a quiesce).
- */
-#define SYNC_DATA_QUIESCE (SYNC_DELWRI|SYNC_FSDATA|SYNC_WAIT|SYNC_IOWAIT)
-#define SYNC_INODE_QUIESCE (SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT)
-
-#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */
-#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
-#define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */
-#define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */
-#define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */
-#define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */
-
-#define xfs_test_for_freeze(mp) ((mp)->m_super->s_frozen)
-#define xfs_wait_for_freeze(mp,l) vfs_check_frozen((mp)->m_super, (l))
-
-#endif /* __XFS_VFS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
deleted file mode 100644
index bc7afe00733..00000000000
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_vnodeops.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inode.h"
-
-/*
- * And this gunk is needed for xfs_mount.h"
- */
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_dmapi.h"
-#include "xfs_inum.h"
-#include "xfs_ag.h"
-#include "xfs_mount.h"
-
-
-/*
- * Dedicated vnode inactive/reclaim sync semaphores.
- * Prime number of hash buckets since address is used as the key.
- */
-#define NVSYNC 37
-#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
-static wait_queue_head_t vsync[NVSYNC];
-
-void __init
-vn_init(void)
-{
- int i;
-
- for (i = 0; i < NVSYNC; i++)
- init_waitqueue_head(&vsync[i]);
-}
-
-void
-vn_iowait(
- xfs_inode_t *ip)
-{
- wait_queue_head_t *wq = vptosync(ip);
-
- wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
-}
-
-void
-vn_iowake(
- xfs_inode_t *ip)
-{
- if (atomic_dec_and_test(&ip->i_iocount))
- wake_up(vptosync(ip));
-}
-
-/*
- * Volume managers supporting multiple paths can send back ENODEV when the
- * final path disappears. In this case continuing to fill the page cache
- * with dirty data which cannot be written out is evil, so prevent that.
- */
-void
-vn_ioerror(
- xfs_inode_t *ip,
- int error,
- char *f,
- int l)
-{
- if (unlikely(error == -ENODEV))
- xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l);
-}
-
-/*
- * Revalidate the Linux inode from the XFS inode.
- * Note: i_size _not_ updated; we must hold the inode
- * semaphore when doing that - callers responsibility.
- */
-int
-vn_revalidate(
- bhv_vnode_t *vp)
-{
- struct inode *inode = vn_to_inode(vp);
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- unsigned long xflags;
-
- xfs_itrace_entry(ip);
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- inode->i_mode = ip->i_d.di_mode;
- inode->i_uid = ip->i_d.di_uid;
- inode->i_gid = ip->i_d.di_gid;
- inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
- inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
- inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
- inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
-
- xflags = xfs_ip2xflags(ip);
- if (xflags & XFS_XFLAG_IMMUTABLE)
- inode->i_flags |= S_IMMUTABLE;
- else
- inode->i_flags &= ~S_IMMUTABLE;
- if (xflags & XFS_XFLAG_APPEND)
- inode->i_flags |= S_APPEND;
- else
- inode->i_flags &= ~S_APPEND;
- if (xflags & XFS_XFLAG_SYNC)
- inode->i_flags |= S_SYNC;
- else
- inode->i_flags &= ~S_SYNC;
- if (xflags & XFS_XFLAG_NOATIME)
- inode->i_flags |= S_NOATIME;
- else
- inode->i_flags &= ~S_NOATIME;
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- xfs_iflags_clear(ip, XFS_IMODIFIED);
- return 0;
-}
-
-/*
- * Add a reference to a referenced vnode.
- */
-bhv_vnode_t *
-vn_hold(
- bhv_vnode_t *vp)
-{
- struct inode *inode;
-
- XFS_STATS_INC(vn_hold);
-
- inode = igrab(vn_to_inode(vp));
- ASSERT(inode);
-
- return vp;
-}
-
-#ifdef XFS_INODE_TRACE
-
-/*
- * Reference count of Linux inode if present, -1 if the xfs_inode
- * has no associated Linux inode.
- */
-static inline int xfs_icount(struct xfs_inode *ip)
-{
- bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
-
- if (vp)
- return vn_count(vp);
- return -1;
-}
-
-#define KTRACE_ENTER(ip, vk, s, line, ra) \
- ktrace_enter( (ip)->i_trace, \
-/* 0 */ (void *)(__psint_t)(vk), \
-/* 1 */ (void *)(s), \
-/* 2 */ (void *)(__psint_t) line, \
-/* 3 */ (void *)(__psint_t)xfs_icount(ip), \
-/* 4 */ (void *)(ra), \
-/* 5 */ NULL, \
-/* 6 */ (void *)(__psint_t)current_cpu(), \
-/* 7 */ (void *)(__psint_t)current_pid(), \
-/* 8 */ (void *)__return_address, \
-/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
-
-/*
- * Vnode tracing code.
- */
-void
-_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
-}
-
-void
-_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
-}
-
-void
-xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
-}
-
-void
-_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
-}
-
-void
-xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
-}
-#endif /* XFS_INODE_TRACE */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
deleted file mode 100644
index 25eb2a9e8d9..00000000000
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_VNODE_H__
-#define __XFS_VNODE_H__
-
-struct file;
-struct bhv_vattr;
-struct xfs_iomap;
-struct attrlist_cursor_kern;
-
-typedef struct inode bhv_vnode_t;
-
-/*
- * Vnode to Linux inode mapping.
- */
-static inline bhv_vnode_t *vn_from_inode(struct inode *inode)
-{
- return inode;
-}
-static inline struct inode *vn_to_inode(bhv_vnode_t *vnode)
-{
- return vnode;
-}
-
-/*
- * Return values for xfs_inactive. A return value of
- * VN_INACTIVE_NOCACHE implies that the file system behavior
- * has disassociated its state and bhv_desc_t from the vnode.
- */
-#define VN_INACTIVE_CACHE 0
-#define VN_INACTIVE_NOCACHE 1
-
-/*
- * Flags for read/write calls - same values as IRIX
- */
-#define IO_ISAIO 0x00001 /* don't wait for completion */
-#define IO_ISDIRECT 0x00004 /* bypass page cache */
-#define IO_INVIS 0x00020 /* don't update inode timestamps */
-
-/*
- * Flags for xfs_inode_flush
- */
-#define FLUSH_SYNC 1 /* wait for flush to complete */
-
-/*
- * Flush/Invalidate options for vop_toss/flush/flushinval_pages.
- */
-#define FI_NONE 0 /* none */
-#define FI_REMAPF 1 /* Do a remapf prior to the operation */
-#define FI_REMAPF_LOCKED 2 /* Do a remapf prior to the operation.
- Prevent VM access to the pages until
- the operation completes. */
-
-/*
- * Vnode attributes. va_mask indicates those attributes the caller
- * wants to set or extract.
- */
-typedef struct bhv_vattr {
- int va_mask; /* bit-mask of attributes present */
- mode_t va_mode; /* file access mode and type */
- xfs_nlink_t va_nlink; /* number of references to file */
- uid_t va_uid; /* owner user id */
- gid_t va_gid; /* owner group id */
- xfs_ino_t va_nodeid; /* file id */
- xfs_off_t va_size; /* file size in bytes */
- u_long va_blocksize; /* blocksize preferred for i/o */
- struct timespec va_atime; /* time of last access */
- struct timespec va_mtime; /* time of last modification */
- struct timespec va_ctime; /* time file changed */
- u_int va_gen; /* generation number of file */
- xfs_dev_t va_rdev; /* device the special file represents */
- __int64_t va_nblocks; /* number of blocks allocated */
- u_long va_xflags; /* random extended file flags */
- u_long va_extsize; /* file extent size */
- u_long va_nextents; /* number of extents in file */
- u_long va_anextents; /* number of attr extents in file */
- prid_t va_projid; /* project id */
-} bhv_vattr_t;
-
-/*
- * setattr or getattr attributes
- */
-#define XFS_AT_TYPE 0x00000001
-#define XFS_AT_MODE 0x00000002
-#define XFS_AT_UID 0x00000004
-#define XFS_AT_GID 0x00000008
-#define XFS_AT_FSID 0x00000010
-#define XFS_AT_NODEID 0x00000020
-#define XFS_AT_NLINK 0x00000040
-#define XFS_AT_SIZE 0x00000080
-#define XFS_AT_ATIME 0x00000100
-#define XFS_AT_MTIME 0x00000200
-#define XFS_AT_CTIME 0x00000400
-#define XFS_AT_RDEV 0x00000800
-#define XFS_AT_BLKSIZE 0x00001000
-#define XFS_AT_NBLOCKS 0x00002000
-#define XFS_AT_VCODE 0x00004000
-#define XFS_AT_MAC 0x00008000
-#define XFS_AT_UPDATIME 0x00010000
-#define XFS_AT_UPDMTIME 0x00020000
-#define XFS_AT_UPDCTIME 0x00040000
-#define XFS_AT_ACL 0x00080000
-#define XFS_AT_CAP 0x00100000
-#define XFS_AT_INF 0x00200000
-#define XFS_AT_XFLAGS 0x00400000
-#define XFS_AT_EXTSIZE 0x00800000
-#define XFS_AT_NEXTENTS 0x01000000
-#define XFS_AT_ANEXTENTS 0x02000000
-#define XFS_AT_PROJID 0x04000000
-#define XFS_AT_SIZE_NOPERM 0x08000000
-#define XFS_AT_GENCOUNT 0x10000000
-
-#define XFS_AT_ALL (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\
- XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\
- XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\
- XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|XFS_AT_MAC|\
- XFS_AT_ACL|XFS_AT_CAP|XFS_AT_INF|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|\
- XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_PROJID|XFS_AT_GENCOUNT)
-
-#define XFS_AT_STAT (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\
- XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\
- XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\
- XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_PROJID)
-
-#define XFS_AT_TIMES (XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME)
-
-#define XFS_AT_UPDTIMES (XFS_AT_UPDATIME|XFS_AT_UPDMTIME|XFS_AT_UPDCTIME)
-
-#define XFS_AT_NOSET (XFS_AT_NLINK|XFS_AT_RDEV|XFS_AT_FSID|XFS_AT_NODEID|\
- XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\
- XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT)
-
-extern void vn_init(void);
-extern int vn_revalidate(bhv_vnode_t *);
-
-/*
- * Yeah, these don't take vnode anymore at all, all this should be
- * cleaned up at some point.
- */
-extern void vn_iowait(struct xfs_inode *ip);
-extern void vn_iowake(struct xfs_inode *ip);
-extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l);
-
-static inline int vn_count(bhv_vnode_t *vp)
-{
- return atomic_read(&vn_to_inode(vp)->i_count);
-}
-
-/*
- * Vnode reference counting functions (and macros for compatibility).
- */
-extern bhv_vnode_t *vn_hold(bhv_vnode_t *);
-
-#if defined(XFS_INODE_TRACE)
-#define VN_HOLD(vp) \
- ((void)vn_hold(vp), \
- xfs_itrace_hold(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address))
-#define VN_RELE(vp) \
- (xfs_itrace_rele(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address), \
- iput(vn_to_inode(vp)))
-#else
-#define VN_HOLD(vp) ((void)vn_hold(vp))
-#define VN_RELE(vp) (iput(vn_to_inode(vp)))
-#endif
-
-static inline bhv_vnode_t *vn_grab(bhv_vnode_t *vp)
-{
- struct inode *inode = igrab(vn_to_inode(vp));
- return inode ? vn_from_inode(inode) : NULL;
-}
-
-/*
- * Dealing with bad inodes
- */
-static inline int VN_BAD(bhv_vnode_t *vp)
-{
- return is_bad_inode(vn_to_inode(vp));
-}
-
-/*
- * Extracting atime values in various formats
- */
-static inline void vn_atime_to_bstime(bhv_vnode_t *vp, xfs_bstime_t *bs_atime)
-{
- bs_atime->tv_sec = vp->i_atime.tv_sec;
- bs_atime->tv_nsec = vp->i_atime.tv_nsec;
-}
-
-static inline void vn_atime_to_timespec(bhv_vnode_t *vp, struct timespec *ts)
-{
- *ts = vp->i_atime;
-}
-
-static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt)
-{
- *tt = vp->i_atime.tv_sec;
-}
-
-/*
- * Some useful predicates.
- */
-#define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping)
-#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages)
-#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \
- PAGECACHE_TAG_DIRTY)
-
-/*
- * Flags to vop_setattr/getattr.
- */
-#define ATTR_UTIME 0x01 /* non-default utime(2) request */
-#define ATTR_DMI 0x08 /* invocation from a DMI function */
-#define ATTR_LAZY 0x80 /* set/get attributes lazily */
-#define ATTR_NONBLOCK 0x100 /* return EAGAIN if operation would block */
-#define ATTR_NOLOCK 0x200 /* Don't grab any conflicting locks */
-#define ATTR_NOSIZETOK 0x400 /* Don't get the SIZE token */
-
-/*
- * Tracking vnode activity.
- */
-#if defined(XFS_INODE_TRACE)
-
-#define INODE_TRACE_SIZE 16 /* number of trace entries */
-#define INODE_KTRACE_ENTRY 1
-#define INODE_KTRACE_EXIT 2
-#define INODE_KTRACE_HOLD 3
-#define INODE_KTRACE_REF 4
-#define INODE_KTRACE_RELE 5
-
-extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *);
-extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *);
-extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *);
-extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *);
-extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *);
-#define xfs_itrace_entry(ip) \
- _xfs_itrace_entry(ip, __func__, (inst_t *)__return_address)
-#define xfs_itrace_exit(ip) \
- _xfs_itrace_exit(ip, __func__, (inst_t *)__return_address)
-#define xfs_itrace_exit_tag(ip, tag) \
- _xfs_itrace_exit(ip, tag, (inst_t *)__return_address)
-#define xfs_itrace_ref(ip) \
- _xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address)
-
-#else
-#define xfs_itrace_entry(a)
-#define xfs_itrace_exit(a)
-#define xfs_itrace_exit_tag(a, b)
-#define xfs_itrace_hold(a, b, c, d)
-#define xfs_itrace_ref(a)
-#define xfs_itrace_rele(a, b, c, d)
-#endif
-
-#endif /* __XFS_VNODE_H__ */
diff --git a/fs/xfs/linux-2.6/mrlock.h b/fs/xfs/mrlock.h
index ff6a19873e5..e3c92d19e54 100644
--- a/fs/xfs/linux-2.6/mrlock.h
+++ b/fs/xfs/mrlock.h
@@ -22,12 +22,12 @@
typedef struct {
struct rw_semaphore mr_lock;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
int mr_writer;
#endif
} mrlock_t;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
#define mrinit(mrp, name) \
do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0)
#else
@@ -46,7 +46,7 @@ static inline void mraccess_nested(mrlock_t *mrp, int subclass)
static inline void mrupdate_nested(mrlock_t *mrp, int subclass)
{
down_write_nested(&mrp->mr_lock, subclass);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
mrp->mr_writer = 1;
#endif
}
@@ -60,7 +60,7 @@ static inline int mrtryupdate(mrlock_t *mrp)
{
if (!down_write_trylock(&mrp->mr_lock))
return 0;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
mrp->mr_writer = 1;
#endif
return 1;
@@ -68,7 +68,7 @@ static inline int mrtryupdate(mrlock_t *mrp)
static inline void mrunlock_excl(mrlock_t *mrp)
{
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
mrp->mr_writer = 0;
#endif
up_write(&mrp->mr_lock);
@@ -81,7 +81,7 @@ static inline void mrunlock_shared(mrlock_t *mrp)
static inline void mrdemote(mrlock_t *mrp)
{
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
mrp->mr_writer = 0;
#endif
downgrade_write(&mrp->mr_lock);
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
deleted file mode 100644
index 85df3288efd..00000000000
--- a/fs/xfs/quota/xfs_dquot.c
+++ /dev/null
@@ -1,1602 +0,0 @@
-/*
- * Copyright (c) 2000-2003 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
-#include "xfs_quota.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
-#include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_trans_space.h"
-#include "xfs_trans_priv.h"
-#include "xfs_qm.h"
-
-
-/*
- LOCK ORDER
-
- inode lock (ilock)
- dquot hash-chain lock (hashlock)
- xqm dquot freelist lock (freelistlock
- mount's dquot list lock (mplistlock)
- user dquot lock - lock ordering among dquots is based on the uid or gid
- group dquot lock - similar to udquots. Between the two dquots, the udquot
- has to be locked first.
- pin lock - the dquot lock must be held to take this lock.
- flush lock - ditto.
-*/
-
-STATIC void xfs_qm_dqflush_done(xfs_buf_t *, xfs_dq_logitem_t *);
-
-#ifdef DEBUG
-xfs_buftarg_t *xfs_dqerror_target;
-int xfs_do_dqerror;
-int xfs_dqreq_num;
-int xfs_dqerror_mod = 33;
-#endif
-
-/*
- * Allocate and initialize a dquot. We don't always allocate fresh memory;
- * we try to reclaim a free dquot if the number of incore dquots are above
- * a threshold.
- * The only field inside the core that gets initialized at this point
- * is the d_id field. The idea is to fill in the entire q_core
- * when we read in the on disk dquot.
- */
-STATIC xfs_dquot_t *
-xfs_qm_dqinit(
- xfs_mount_t *mp,
- xfs_dqid_t id,
- uint type)
-{
- xfs_dquot_t *dqp;
- boolean_t brandnewdquot;
-
- brandnewdquot = xfs_qm_dqalloc_incore(&dqp);
- dqp->dq_flags = type;
- dqp->q_core.d_id = cpu_to_be32(id);
- dqp->q_mount = mp;
-
- /*
- * No need to re-initialize these if this is a reclaimed dquot.
- */
- if (brandnewdquot) {
- dqp->dq_flnext = dqp->dq_flprev = dqp;
- mutex_init(&dqp->q_qlock);
- initnsema(&dqp->q_flock, 1, "fdq");
- sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq");
-
-#ifdef XFS_DQUOT_TRACE
- dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP);
- xfs_dqtrace_entry(dqp, "DQINIT");
-#endif
- } else {
- /*
- * Only the q_core portion was zeroed in dqreclaim_one().
- * So, we need to reset others.
- */
- dqp->q_nrefs = 0;
- dqp->q_blkno = 0;
- dqp->MPL_NEXT = dqp->HL_NEXT = NULL;
- dqp->HL_PREVP = dqp->MPL_PREVP = NULL;
- dqp->q_bufoffset = 0;
- dqp->q_fileoffset = 0;
- dqp->q_transp = NULL;
- dqp->q_gdquot = NULL;
- dqp->q_res_bcount = 0;
- dqp->q_res_icount = 0;
- dqp->q_res_rtbcount = 0;
- dqp->q_pincount = 0;
- dqp->q_hash = NULL;
- ASSERT(dqp->dq_flnext == dqp->dq_flprev);
-
-#ifdef XFS_DQUOT_TRACE
- ASSERT(dqp->q_trace);
- xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT");
-#endif
- }
-
- /*
- * log item gets initialized later
- */
- return (dqp);
-}
-
-/*
- * This is called to free all the memory associated with a dquot
- */
-void
-xfs_qm_dqdestroy(
- xfs_dquot_t *dqp)
-{
- ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp));
-
- mutex_destroy(&dqp->q_qlock);
- freesema(&dqp->q_flock);
- sv_destroy(&dqp->q_pinwait);
-
-#ifdef XFS_DQUOT_TRACE
- if (dqp->q_trace)
- ktrace_free(dqp->q_trace);
- dqp->q_trace = NULL;
-#endif
- kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
- atomic_dec(&xfs_Gqm->qm_totaldquots);
-}
-
-/*
- * This is what a 'fresh' dquot inside a dquot chunk looks like on disk.
- */
-STATIC void
-xfs_qm_dqinit_core(
- xfs_dqid_t id,
- uint type,
- xfs_dqblk_t *d)
-{
- /*
- * Caller has zero'd the entire dquot 'chunk' already.
- */
- d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
- d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
- d->dd_diskdq.d_id = cpu_to_be32(id);
- d->dd_diskdq.d_flags = type;
-}
-
-
-#ifdef XFS_DQUOT_TRACE
-/*
- * Dquot tracing for debugging.
- */
-/* ARGSUSED */
-void
-__xfs_dqtrace_entry(
- xfs_dquot_t *dqp,
- char *func,
- void *retaddr,
- xfs_inode_t *ip)
-{
- xfs_dquot_t *udqp = NULL;
- xfs_ino_t ino = 0;
-
- ASSERT(dqp->q_trace);
- if (ip) {
- ino = ip->i_ino;
- udqp = ip->i_udquot;
- }
- ktrace_enter(dqp->q_trace,
- (void *)(__psint_t)DQUOT_KTRACE_ENTRY,
- (void *)func,
- (void *)(__psint_t)dqp->q_nrefs,
- (void *)(__psint_t)dqp->dq_flags,
- (void *)(__psint_t)dqp->q_res_bcount,
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_bcount),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_icount),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_hardlimit),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_softlimit),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_hardlimit),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_softlimit),
- (void *)(__psint_t)be32_to_cpu(dqp->q_core.d_id),
- (void *)(__psint_t)current_pid(),
- (void *)(__psint_t)ino,
- (void *)(__psint_t)retaddr,
- (void *)(__psint_t)udqp);
- return;
-}
-#endif
-
-
-/*
- * If default limits are in force, push them into the dquot now.
- * We overwrite the dquot limits only if they are zero and this
- * is not the root dquot.
- */
-void
-xfs_qm_adjust_dqlimits(
- xfs_mount_t *mp,
- xfs_disk_dquot_t *d)
-{
- xfs_quotainfo_t *q = mp->m_quotainfo;
-
- ASSERT(d->d_id);
-
- if (q->qi_bsoftlimit && !d->d_blk_softlimit)
- d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
- if (q->qi_bhardlimit && !d->d_blk_hardlimit)
- d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
- if (q->qi_isoftlimit && !d->d_ino_softlimit)
- d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
- if (q->qi_ihardlimit && !d->d_ino_hardlimit)
- d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
- if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
- d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
- if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
- d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
-}
-
-/*
- * Check the limits and timers of a dquot and start or reset timers
- * if necessary.
- * This gets called even when quota enforcement is OFF, which makes our
- * life a little less complicated. (We just don't reject any quota
- * reservations in that case, when enforcement is off).
- * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
- * enforcement's off.
- * In contrast, warnings are a little different in that they don't
- * 'automatically' get started when limits get exceeded. They do
- * get reset to zero, however, when we find the count to be under
- * the soft limit (they are only ever set non-zero via userspace).
- */
-void
-xfs_qm_adjust_dqtimers(
- xfs_mount_t *mp,
- xfs_disk_dquot_t *d)
-{
- ASSERT(d->d_id);
-
-#ifdef QUOTADEBUG
- if (d->d_blk_hardlimit)
- ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
- be64_to_cpu(d->d_blk_hardlimit));
- if (d->d_ino_hardlimit)
- ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
- be64_to_cpu(d->d_ino_hardlimit));
- if (d->d_rtb_hardlimit)
- ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
- be64_to_cpu(d->d_rtb_hardlimit));
-#endif
- if (!d->d_btimer) {
- if ((d->d_blk_softlimit &&
- (be64_to_cpu(d->d_bcount) >=
- be64_to_cpu(d->d_blk_softlimit))) ||
- (d->d_blk_hardlimit &&
- (be64_to_cpu(d->d_bcount) >=
- be64_to_cpu(d->d_blk_hardlimit)))) {
- d->d_btimer = cpu_to_be32(get_seconds() +
- XFS_QI_BTIMELIMIT(mp));
- } else {
- d->d_bwarns = 0;
- }
- } else {
- if ((!d->d_blk_softlimit ||
- (be64_to_cpu(d->d_bcount) <
- be64_to_cpu(d->d_blk_softlimit))) &&
- (!d->d_blk_hardlimit ||
- (be64_to_cpu(d->d_bcount) <
- be64_to_cpu(d->d_blk_hardlimit)))) {
- d->d_btimer = 0;
- }
- }
-
- if (!d->d_itimer) {
- if ((d->d_ino_softlimit &&
- (be64_to_cpu(d->d_icount) >=
- be64_to_cpu(d->d_ino_softlimit))) ||
- (d->d_ino_hardlimit &&
- (be64_to_cpu(d->d_icount) >=
- be64_to_cpu(d->d_ino_hardlimit)))) {
- d->d_itimer = cpu_to_be32(get_seconds() +
- XFS_QI_ITIMELIMIT(mp));
- } else {
- d->d_iwarns = 0;
- }
- } else {
- if ((!d->d_ino_softlimit ||
- (be64_to_cpu(d->d_icount) <
- be64_to_cpu(d->d_ino_softlimit))) &&
- (!d->d_ino_hardlimit ||
- (be64_to_cpu(d->d_icount) <
- be64_to_cpu(d->d_ino_hardlimit)))) {
- d->d_itimer = 0;
- }
- }
-
- if (!d->d_rtbtimer) {
- if ((d->d_rtb_softlimit &&
- (be64_to_cpu(d->d_rtbcount) >=
- be64_to_cpu(d->d_rtb_softlimit))) ||
- (d->d_rtb_hardlimit &&
- (be64_to_cpu(d->d_rtbcount) >=
- be64_to_cpu(d->d_rtb_hardlimit)))) {
- d->d_rtbtimer = cpu_to_be32(get_seconds() +
- XFS_QI_RTBTIMELIMIT(mp));
- } else {
- d->d_rtbwarns = 0;
- }
- } else {
- if ((!d->d_rtb_softlimit ||
- (be64_to_cpu(d->d_rtbcount) <
- be64_to_cpu(d->d_rtb_softlimit))) &&
- (!d->d_rtb_hardlimit ||
- (be64_to_cpu(d->d_rtbcount) <
- be64_to_cpu(d->d_rtb_hardlimit)))) {
- d->d_rtbtimer = 0;
- }
- }
-}
-
-/*
- * initialize a buffer full of dquots and log the whole thing
- */
-STATIC void
-xfs_qm_init_dquot_blk(
- xfs_trans_t *tp,
- xfs_mount_t *mp,
- xfs_dqid_t id,
- uint type,
- xfs_buf_t *bp)
-{
- xfs_dqblk_t *d;
- int curid, i;
-
- ASSERT(tp);
- ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
-
- d = (xfs_dqblk_t *)XFS_BUF_PTR(bp);
-
- /*
- * ID of the first dquot in the block - id's are zero based.
- */
- curid = id - (id % XFS_QM_DQPERBLK(mp));
- ASSERT(curid >= 0);
- memset(d, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)));
- for (i = 0; i < XFS_QM_DQPERBLK(mp); i++, d++, curid++)
- xfs_qm_dqinit_core(curid, type, d);
- xfs_trans_dquot_buf(tp, bp,
- (type & XFS_DQ_USER ? XFS_BLI_UDQUOT_BUF :
- ((type & XFS_DQ_PROJ) ? XFS_BLI_PDQUOT_BUF :
- XFS_BLI_GDQUOT_BUF)));
- xfs_trans_log_buf(tp, bp, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1);
-}
-
-
-
-/*
- * Allocate a block and fill it with dquots.
- * This is called when the bmapi finds a hole.
- */
-STATIC int
-xfs_qm_dqalloc(
- xfs_trans_t **tpp,
- xfs_mount_t *mp,
- xfs_dquot_t *dqp,
- xfs_inode_t *quotip,
- xfs_fileoff_t offset_fsb,
- xfs_buf_t **O_bpp)
-{
- xfs_fsblock_t firstblock;
- xfs_bmap_free_t flist;
- xfs_bmbt_irec_t map;
- int nmaps, error, committed;
- xfs_buf_t *bp;
- xfs_trans_t *tp = *tpp;
-
- ASSERT(tp != NULL);
- xfs_dqtrace_entry(dqp, "DQALLOC");
-
- /*
- * Initialize the bmap freelist prior to calling bmapi code.
- */
- XFS_BMAP_INIT(&flist, &firstblock);
- xfs_ilock(quotip, XFS_ILOCK_EXCL);
- /*
- * Return if this type of quotas is turned off while we didn't
- * have an inode lock
- */
- if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
- xfs_iunlock(quotip, XFS_ILOCK_EXCL);
- return (ESRCH);
- }
-
- /*
- * xfs_trans_commit normally decrements the vnode ref count
- * when it unlocks the inode. Since we want to keep the quota
- * inode around, we bump the vnode ref count now.
- */
- VN_HOLD(XFS_ITOV(quotip));
-
- xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
- nmaps = 1;
- if ((error = xfs_bmapi(tp, quotip,
- offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB,
- XFS_BMAPI_METADATA | XFS_BMAPI_WRITE,
- &firstblock,
- XFS_QM_DQALLOC_SPACE_RES(mp),
- &map, &nmaps, &flist, NULL))) {
- goto error0;
- }
- ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
- ASSERT(nmaps == 1);
- ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
- (map.br_startblock != HOLESTARTBLOCK));
-
- /*
- * Keep track of the blkno to save a lookup later
- */
- dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
-
- /* now we can just get the buffer (there's nothing to read yet) */
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
- dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(mp),
- 0);
- if (!bp || (error = XFS_BUF_GETERROR(bp)))
- goto error1;
- /*
- * Make a chunk of dquots out of this buffer and log
- * the entire thing.
- */
- xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
- dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
-
- /*
- * xfs_bmap_finish() may commit the current transaction and
- * start a second transaction if the freelist is not empty.
- *
- * Since we still want to modify this buffer, we need to
- * ensure that the buffer is not released on commit of
- * the first transaction and ensure the buffer is added to the
- * second transaction.
- *
- * If there is only one transaction then don't stop the buffer
- * from being released when it commits later on.
- */
-
- xfs_trans_bhold(tp, bp);
-
- if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
- goto error1;
- }
-
- if (committed) {
- tp = *tpp;
- xfs_trans_bjoin(tp, bp);
- } else {
- xfs_trans_bhold_release(tp, bp);
- }
-
- *O_bpp = bp;
- return 0;
-
- error1:
- xfs_bmap_cancel(&flist);
- error0:
- xfs_iunlock(quotip, XFS_ILOCK_EXCL);
-
- return (error);
-}
-
-/*
- * Maps a dquot to the buffer containing its on-disk version.
- * This returns a ptr to the buffer containing the on-disk dquot
- * in the bpp param, and a ptr to the on-disk dquot within that buffer
- */
-STATIC int
-xfs_qm_dqtobp(
- xfs_trans_t **tpp,
- xfs_dquot_t *dqp,
- xfs_disk_dquot_t **O_ddpp,
- xfs_buf_t **O_bpp,
- uint flags)
-{
- xfs_bmbt_irec_t map;
- int nmaps, error;
- xfs_buf_t *bp;
- xfs_inode_t *quotip;
- xfs_mount_t *mp;
- xfs_disk_dquot_t *ddq;
- xfs_dqid_t id;
- boolean_t newdquot;
- xfs_trans_t *tp = (tpp ? *tpp : NULL);
-
- mp = dqp->q_mount;
- id = be32_to_cpu(dqp->q_core.d_id);
- nmaps = 1;
- newdquot = B_FALSE;
-
- /*
- * If we don't know where the dquot lives, find out.
- */
- if (dqp->q_blkno == (xfs_daddr_t) 0) {
- /* We use the id as an index */
- dqp->q_fileoffset = (xfs_fileoff_t)id / XFS_QM_DQPERBLK(mp);
- nmaps = 1;
- quotip = XFS_DQ_TO_QIP(dqp);
- xfs_ilock(quotip, XFS_ILOCK_SHARED);
- /*
- * Return if this type of quotas is turned off while we didn't
- * have an inode lock
- */
- if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
- xfs_iunlock(quotip, XFS_ILOCK_SHARED);
- return (ESRCH);
- }
- /*
- * Find the block map; no allocations yet
- */
- error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
- XFS_DQUOT_CLUSTER_SIZE_FSB,
- XFS_BMAPI_METADATA,
- NULL, 0, &map, &nmaps, NULL, NULL);
-
- xfs_iunlock(quotip, XFS_ILOCK_SHARED);
- if (error)
- return (error);
- ASSERT(nmaps == 1);
- ASSERT(map.br_blockcount == 1);
-
- /*
- * offset of dquot in the (fixed sized) dquot chunk.
- */
- dqp->q_bufoffset = (id % XFS_QM_DQPERBLK(mp)) *
- sizeof(xfs_dqblk_t);
- if (map.br_startblock == HOLESTARTBLOCK) {
- /*
- * We don't allocate unless we're asked to
- */
- if (!(flags & XFS_QMOPT_DQALLOC))
- return (ENOENT);
-
- ASSERT(tp);
- if ((error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
- dqp->q_fileoffset, &bp)))
- return (error);
- tp = *tpp;
- newdquot = B_TRUE;
- } else {
- /*
- * store the blkno etc so that we don't have to do the
- * mapping all the time
- */
- dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
- }
- }
- ASSERT(dqp->q_blkno != DELAYSTARTBLOCK);
- ASSERT(dqp->q_blkno != HOLESTARTBLOCK);
-
- /*
- * Read in the buffer, unless we've just done the allocation
- * (in which case we already have the buf).
- */
- if (! newdquot) {
- xfs_dqtrace_entry(dqp, "DQTOBP READBUF");
- if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
- dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(mp),
- 0, &bp))) {
- return (error);
- }
- if (error || !bp)
- return XFS_ERROR(error);
- }
- ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
-
- /*
- * calculate the location of the dquot inside the buffer.
- */
- ddq = (xfs_disk_dquot_t *)((char *)XFS_BUF_PTR(bp) + dqp->q_bufoffset);
-
- /*
- * A simple sanity check in case we got a corrupted dquot...
- */
- if (xfs_qm_dqcheck(ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES,
- flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN),
- "dqtobp")) {
- if (!(flags & XFS_QMOPT_DQREPAIR)) {
- xfs_trans_brelse(tp, bp);
- return XFS_ERROR(EIO);
- }
- XFS_BUF_BUSY(bp); /* We dirtied this */
- }
-
- *O_bpp = bp;
- *O_ddpp = ddq;
-
- return (0);
-}
-
-
-/*
- * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
- * and release the buffer immediately.
- *
- */
-/* ARGSUSED */
-STATIC int
-xfs_qm_dqread(
- xfs_trans_t **tpp,
- xfs_dqid_t id,
- xfs_dquot_t *dqp, /* dquot to get filled in */
- uint flags)
-{
- xfs_disk_dquot_t *ddqp;
- xfs_buf_t *bp;
- int error;
- xfs_trans_t *tp;
-
- ASSERT(tpp);
-
- /*
- * get a pointer to the on-disk dquot and the buffer containing it
- * dqp already knows its own type (GROUP/USER).
- */
- xfs_dqtrace_entry(dqp, "DQREAD");
- if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) {
- return (error);
- }
- tp = *tpp;
-
- /* copy everything from disk dquot to the incore dquot */
- memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
- ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);
- xfs_qm_dquot_logitem_init(dqp);
-
- /*
- * Reservation counters are defined as reservation plus current usage
- * to avoid having to add everytime.
- */
- dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
- dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
- dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
-
- /* Mark the buf so that this will stay incore a little longer */
- XFS_BUF_SET_VTYPE_REF(bp, B_FS_DQUOT, XFS_DQUOT_REF);
-
- /*
- * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
- * So we need to release with xfs_trans_brelse().
- * The strategy here is identical to that of inodes; we lock
- * the dquot in xfs_qm_dqget() before making it accessible to
- * others. This is because dquots, like inodes, need a good level of
- * concurrency, and we don't want to take locks on the entire buffers
- * for dquot accesses.
- * Note also that the dquot buffer may even be dirty at this point, if
- * this particular dquot was repaired. We still aren't afraid to
- * brelse it because we have the changes incore.
- */
- ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
- xfs_trans_brelse(tp, bp);
-
- return (error);
-}
-
-
-/*
- * allocate an incore dquot from the kernel heap,
- * and fill its core with quota information kept on disk.
- * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk
- * if it wasn't already allocated.
- */
-STATIC int
-xfs_qm_idtodq(
- xfs_mount_t *mp,
- xfs_dqid_t id, /* gid or uid, depending on type */
- uint type, /* UDQUOT or GDQUOT */
- uint flags, /* DQALLOC, DQREPAIR */
- xfs_dquot_t **O_dqpp)/* OUT : incore dquot, not locked */
-{
- xfs_dquot_t *dqp;
- int error;
- xfs_trans_t *tp;
- int cancelflags=0;
-
- dqp = xfs_qm_dqinit(mp, id, type);
- tp = NULL;
- if (flags & XFS_QMOPT_DQALLOC) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
- if ((error = xfs_trans_reserve(tp,
- XFS_QM_DQALLOC_SPACE_RES(mp),
- XFS_WRITE_LOG_RES(mp) +
- BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1 +
- 128,
- 0,
- XFS_TRANS_PERM_LOG_RES,
- XFS_WRITE_LOG_COUNT))) {
- cancelflags = 0;
- goto error0;
- }
- cancelflags = XFS_TRANS_RELEASE_LOG_RES;
- }
-
- /*
- * Read it from disk; xfs_dqread() takes care of
- * all the necessary initialization of dquot's fields (locks, etc)
- */
- if ((error = xfs_qm_dqread(&tp, id, dqp, flags))) {
- /*
- * This can happen if quotas got turned off (ESRCH),
- * or if the dquot didn't exist on disk and we ask to
- * allocate (ENOENT).
- */
- xfs_dqtrace_entry(dqp, "DQREAD FAIL");
- cancelflags |= XFS_TRANS_ABORT;
- goto error0;
- }
- if (tp) {
- if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES)))
- goto error1;
- }
-
- *O_dqpp = dqp;
- return (0);
-
- error0:
- ASSERT(error);
- if (tp)
- xfs_trans_cancel(tp, cancelflags);
- error1:
- xfs_qm_dqdestroy(dqp);
- *O_dqpp = NULL;
- return (error);
-}
-
-/*
- * Lookup a dquot in the incore dquot hashtable. We keep two separate
- * hashtables for user and group dquots; and, these are global tables
- * inside the XQM, not per-filesystem tables.
- * The hash chain must be locked by caller, and it is left locked
- * on return. Returning dquot is locked.
- */
-STATIC int
-xfs_qm_dqlookup(
- xfs_mount_t *mp,
- xfs_dqid_t id,
- xfs_dqhash_t *qh,
- xfs_dquot_t **O_dqpp)
-{
- xfs_dquot_t *dqp;
- uint flist_locked;
- xfs_dquot_t *d;
-
- ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
-
- flist_locked = B_FALSE;
-
- /*
- * Traverse the hashchain looking for a match
- */
- for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) {
- /*
- * We already have the hashlock. We don't need the
- * dqlock to look at the id field of the dquot, since the
- * id can't be modified without the hashlock anyway.
- */
- if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {
- xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP");
- /*
- * All in core dquots must be on the dqlist of mp
- */
- ASSERT(dqp->MPL_PREVP != NULL);
-
- xfs_dqlock(dqp);
- if (dqp->q_nrefs == 0) {
- ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));
- if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
- xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT");
-
- /*
- * We may have raced with dqreclaim_one()
- * (and lost). So, flag that we don't
- * want the dquot to be reclaimed.
- */
- dqp->dq_flags |= XFS_DQ_WANT;
- xfs_dqunlock(dqp);
- xfs_qm_freelist_lock(xfs_Gqm);
- xfs_dqlock(dqp);
- dqp->dq_flags &= ~(XFS_DQ_WANT);
- }
- flist_locked = B_TRUE;
- }
-
- /*
- * id couldn't have changed; we had the hashlock all
- * along
- */
- ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);
-
- if (flist_locked) {
- if (dqp->q_nrefs != 0) {
- xfs_qm_freelist_unlock(xfs_Gqm);
- flist_locked = B_FALSE;
- } else {
- /*
- * take it off the freelist
- */
- xfs_dqtrace_entry(dqp,
- "DQLOOKUP: TAKEOFF FL");
- XQM_FREELIST_REMOVE(dqp);
- /* xfs_qm_freelist_print(&(xfs_Gqm->
- qm_dqfreelist),
- "after removal"); */
- }
- }
-
- /*
- * grab a reference
- */
- XFS_DQHOLD(dqp);
-
- if (flist_locked)
- xfs_qm_freelist_unlock(xfs_Gqm);
- /*
- * move the dquot to the front of the hashchain
- */
- ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
- if (dqp->HL_PREVP != &qh->qh_next) {
- xfs_dqtrace_entry(dqp,
- "DQLOOKUP: HASH MOVETOFRONT");
- if ((d = dqp->HL_NEXT))
- d->HL_PREVP = dqp->HL_PREVP;
- *(dqp->HL_PREVP) = d;
- d = qh->qh_next;
- d->HL_PREVP = &dqp->HL_NEXT;
- dqp->HL_NEXT = d;
- dqp->HL_PREVP = &qh->qh_next;
- qh->qh_next = dqp;
- }
- xfs_dqtrace_entry(dqp, "LOOKUP END");
- *O_dqpp = dqp;
- ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
- return (0);
- }
- }
-
- *O_dqpp = NULL;
- ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
- return (1);
-}
-
-/*
- * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
- * a locked dquot, doing an allocation (if requested) as needed.
- * When both an inode and an id are given, the inode's id takes precedence.
- * That is, if the id changes while we don't hold the ilock inside this
- * function, the new dquot is returned, not necessarily the one requested
- * in the id argument.
- */
-int
-xfs_qm_dqget(
- xfs_mount_t *mp,
- xfs_inode_t *ip, /* locked inode (optional) */
- xfs_dqid_t id, /* uid/projid/gid depending on type */
- uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
- uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
- xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */
-{
- xfs_dquot_t *dqp;
- xfs_dqhash_t *h;
- uint version;
- int error;
-
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
- if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
- (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
- (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
- return (ESRCH);
- }
- h = XFS_DQ_HASH(mp, id, type);
-
-#ifdef DEBUG
- if (xfs_do_dqerror) {
- if ((xfs_dqerror_target == mp->m_ddev_targp) &&
- (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
- cmn_err(CE_DEBUG, "Returning error in dqget");
- return (EIO);
- }
- }
-#endif
-
- again:
-
-#ifdef DEBUG
- ASSERT(type == XFS_DQ_USER ||
- type == XFS_DQ_PROJ ||
- type == XFS_DQ_GROUP);
- if (ip) {
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- if (type == XFS_DQ_USER)
- ASSERT(ip->i_udquot == NULL);
- else
- ASSERT(ip->i_gdquot == NULL);
- }
-#endif
- XFS_DQ_HASH_LOCK(h);
-
- /*
- * Look in the cache (hashtable).
- * The chain is kept locked during lookup.
- */
- if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) {
- XQM_STATS_INC(xqmstats.xs_qm_dqcachehits);
- /*
- * The dquot was found, moved to the front of the chain,
- * taken off the freelist if it was on it, and locked
- * at this point. Just unlock the hashchain and return.
- */
- ASSERT(*O_dqpp);
- ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
- XFS_DQ_HASH_UNLOCK(h);
- xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)");
- return (0); /* success */
- }
- XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
-
- /*
- * Dquot cache miss. We don't want to keep the inode lock across
- * a (potential) disk read. Also we don't want to deal with the lock
- * ordering between quotainode and this inode. OTOH, dropping the inode
- * lock here means dealing with a chown that can happen before
- * we re-acquire the lock.
- */
- if (ip)
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- /*
- * Save the hashchain version stamp, and unlock the chain, so that
- * we don't keep the lock across a disk read
- */
- version = h->qh_version;
- XFS_DQ_HASH_UNLOCK(h);
-
- /*
- * Allocate the dquot on the kernel heap, and read the ondisk
- * portion off the disk. Also, do all the necessary initialization
- * This can return ENOENT if dquot didn't exist on disk and we didn't
- * ask it to allocate; ESRCH if quotas got turned off suddenly.
- */
- if ((error = xfs_qm_idtodq(mp, id, type,
- flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR|
- XFS_QMOPT_DOWARN),
- &dqp))) {
- if (ip)
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- return (error);
- }
-
- /*
- * See if this is mount code calling to look at the overall quota limits
- * which are stored in the id == 0 user or group's dquot.
- * Since we may not have done a quotacheck by this point, just return
- * the dquot without attaching it to any hashtables, lists, etc, or even
- * taking a reference.
- * The caller must dqdestroy this once done.
- */
- if (flags & XFS_QMOPT_DQSUSER) {
- ASSERT(id == 0);
- ASSERT(! ip);
- goto dqret;
- }
-
- /*
- * Dquot lock comes after hashlock in the lock ordering
- */
- if (ip) {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (! XFS_IS_DQTYPE_ON(mp, type)) {
- /* inode stays locked on return */
- xfs_qm_dqdestroy(dqp);
- return XFS_ERROR(ESRCH);
- }
- /*
- * A dquot could be attached to this inode by now, since
- * we had dropped the ilock.
- */
- if (type == XFS_DQ_USER) {
- if (ip->i_udquot) {
- xfs_qm_dqdestroy(dqp);
- dqp = ip->i_udquot;
- xfs_dqlock(dqp);
- goto dqret;
- }
- } else {
- if (ip->i_gdquot) {
- xfs_qm_dqdestroy(dqp);
- dqp = ip->i_gdquot;
- xfs_dqlock(dqp);
- goto dqret;
- }
- }
- }
-
- /*
- * Hashlock comes after ilock in lock order
- */
- XFS_DQ_HASH_LOCK(h);
- if (version != h->qh_version) {
- xfs_dquot_t *tmpdqp;
- /*
- * Now, see if somebody else put the dquot in the
- * hashtable before us. This can happen because we didn't
- * keep the hashchain lock. We don't have to worry about
- * lock order between the two dquots here since dqp isn't
- * on any findable lists yet.
- */
- if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) {
- /*
- * Duplicate found. Just throw away the new dquot
- * and start over.
- */
- xfs_qm_dqput(tmpdqp);
- XFS_DQ_HASH_UNLOCK(h);
- xfs_qm_dqdestroy(dqp);
- XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
- goto again;
- }
- }
-
- /*
- * Put the dquot at the beginning of the hash-chain and mp's list
- * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock ..
- */
- ASSERT(XFS_DQ_IS_HASH_LOCKED(h));
- dqp->q_hash = h;
- XQM_HASHLIST_INSERT(h, dqp);
-
- /*
- * Attach this dquot to this filesystem's list of all dquots,
- * kept inside the mount structure in m_quotainfo field
- */
- xfs_qm_mplist_lock(mp);
-
- /*
- * We return a locked dquot to the caller, with a reference taken
- */
- xfs_dqlock(dqp);
- dqp->q_nrefs = 1;
-
- XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp);
-
- xfs_qm_mplist_unlock(mp);
- XFS_DQ_HASH_UNLOCK(h);
- dqret:
- ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
- xfs_dqtrace_entry(dqp, "DQGET DONE");
- *O_dqpp = dqp;
- return (0);
-}
-
-
-/*
- * Release a reference to the dquot (decrement ref-count)
- * and unlock it. If there is a group quota attached to this
- * dquot, carefully release that too without tripping over
- * deadlocks'n'stuff.
- */
-void
-xfs_qm_dqput(
- xfs_dquot_t *dqp)
-{
- xfs_dquot_t *gdqp;
-
- ASSERT(dqp->q_nrefs > 0);
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
- xfs_dqtrace_entry(dqp, "DQPUT");
-
- if (dqp->q_nrefs != 1) {
- dqp->q_nrefs--;
- xfs_dqunlock(dqp);
- return;
- }
-
- /*
- * drop the dqlock and acquire the freelist and dqlock
- * in the right order; but try to get it out-of-order first
- */
- if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
- xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT");
- xfs_dqunlock(dqp);
- xfs_qm_freelist_lock(xfs_Gqm);
- xfs_dqlock(dqp);
- }
-
- while (1) {
- gdqp = NULL;
-
- /* We can't depend on nrefs being == 1 here */
- if (--dqp->q_nrefs == 0) {
- xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST");
- /*
- * insert at end of the freelist.
- */
- XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp);
-
- /*
- * If we just added a udquot to the freelist, then
- * we want to release the gdquot reference that
- * it (probably) has. Otherwise it'll keep the
- * gdquot from getting reclaimed.
- */
- if ((gdqp = dqp->q_gdquot)) {
- /*
- * Avoid a recursive dqput call
- */
- xfs_dqlock(gdqp);
- dqp->q_gdquot = NULL;
- }
-
- /* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist),
- "@@@@@++ Free list (after append) @@@@@+");
- */
- }
- xfs_dqunlock(dqp);
-
- /*
- * If we had a group quota inside the user quota as a hint,
- * release it now.
- */
- if (! gdqp)
- break;
- dqp = gdqp;
- }
- xfs_qm_freelist_unlock(xfs_Gqm);
-}
-
-/*
- * Release a dquot. Flush it if dirty, then dqput() it.
- * dquot must not be locked.
- */
-void
-xfs_qm_dqrele(
- xfs_dquot_t *dqp)
-{
- ASSERT(dqp);
- xfs_dqtrace_entry(dqp, "DQRELE");
-
- xfs_dqlock(dqp);
- /*
- * We don't care to flush it if the dquot is dirty here.
- * That will create stutters that we want to avoid.
- * Instead we do a delayed write when we try to reclaim
- * a dirty dquot. Also xfs_sync will take part of the burden...
- */
- xfs_qm_dqput(dqp);
-}
-
-
-/*
- * Write a modified dquot to disk.
- * The dquot must be locked and the flush lock too taken by caller.
- * The flush lock will not be unlocked until the dquot reaches the disk,
- * but the dquot is free to be unlocked and modified by the caller
- * in the interim. Dquot is still locked on return. This behavior is
- * identical to that of inodes.
- */
-int
-xfs_qm_dqflush(
- xfs_dquot_t *dqp,
- uint flags)
-{
- xfs_mount_t *mp;
- xfs_buf_t *bp;
- xfs_disk_dquot_t *ddqp;
- int error;
-
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
- ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
- xfs_dqtrace_entry(dqp, "DQFLUSH");
-
- /*
- * If not dirty, nada.
- */
- if (!XFS_DQ_IS_DIRTY(dqp)) {
- xfs_dqfunlock(dqp);
- return (0);
- }
-
- /*
- * Cant flush a pinned dquot. Wait for it.
- */
- xfs_qm_dqunpin_wait(dqp);
-
- /*
- * This may have been unpinned because the filesystem is shutting
- * down forcibly. If that's the case we must not write this dquot
- * to disk, because the log record didn't make it to disk!
- */
- if (XFS_FORCED_SHUTDOWN(dqp->q_mount)) {
- dqp->dq_flags &= ~(XFS_DQ_DIRTY);
- xfs_dqfunlock(dqp);
- return XFS_ERROR(EIO);
- }
-
- /*
- * Get the buffer containing the on-disk dquot
- * We don't need a transaction envelope because we know that the
- * the ondisk-dquot has already been allocated for.
- */
- if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) {
- xfs_dqtrace_entry(dqp, "DQTOBP FAIL");
- ASSERT(error != ENOENT);
- /*
- * Quotas could have gotten turned off (ESRCH)
- */
- xfs_dqfunlock(dqp);
- return (error);
- }
-
- if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id),
- 0, XFS_QMOPT_DOWARN, "dqflush (incore copy)")) {
- xfs_force_shutdown(dqp->q_mount, SHUTDOWN_CORRUPT_INCORE);
- return XFS_ERROR(EIO);
- }
-
- /* This is the only portion of data that needs to persist */
- memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t));
-
- /*
- * Clear the dirty field and remember the flush lsn for later use.
- */
- dqp->dq_flags &= ~(XFS_DQ_DIRTY);
- mp = dqp->q_mount;
-
- /* lsn is 64 bits */
- spin_lock(&mp->m_ail_lock);
- dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn;
- spin_unlock(&mp->m_ail_lock);
-
- /*
- * Attach an iodone routine so that we can remove this dquot from the
- * AIL and release the flush lock once the dquot is synced to disk.
- */
- xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t *, xfs_log_item_t *))
- xfs_qm_dqflush_done, &(dqp->q_logitem.qli_item));
- /*
- * If the buffer is pinned then push on the log so we won't
- * get stuck waiting in the write for too long.
- */
- if (XFS_BUF_ISPINNED(bp)) {
- xfs_dqtrace_entry(dqp, "DQFLUSH LOG FORCE");
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
- }
-
- if (flags & XFS_QMOPT_DELWRI) {
- xfs_bdwrite(mp, bp);
- } else if (flags & XFS_QMOPT_ASYNC) {
- error = xfs_bawrite(mp, bp);
- } else {
- error = xfs_bwrite(mp, bp);
- }
- xfs_dqtrace_entry(dqp, "DQFLUSH END");
- /*
- * dqp is still locked, but caller is free to unlock it now.
- */
- return (error);
-
-}
-
-/*
- * This is the dquot flushing I/O completion routine. It is called
- * from interrupt level when the buffer containing the dquot is
- * flushed to disk. It is responsible for removing the dquot logitem
- * from the AIL if it has not been re-logged, and unlocking the dquot's
- * flush lock. This behavior is very similar to that of inodes..
- */
-/*ARGSUSED*/
-STATIC void
-xfs_qm_dqflush_done(
- xfs_buf_t *bp,
- xfs_dq_logitem_t *qip)
-{
- xfs_dquot_t *dqp;
-
- dqp = qip->qli_dquot;
-
- /*
- * We only want to pull the item from the AIL if its
- * location in the log has not changed since we started the flush.
- * Thus, we only bother if the dquot's lsn has
- * not changed. First we check the lsn outside the lock
- * since it's cheaper, and then we recheck while
- * holding the lock before removing the dquot from the AIL.
- */
- if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
- qip->qli_item.li_lsn == qip->qli_flush_lsn) {
-
- spin_lock(&dqp->q_mount->m_ail_lock);
- /*
- * xfs_trans_delete_ail() drops the AIL lock.
- */
- if (qip->qli_item.li_lsn == qip->qli_flush_lsn)
- xfs_trans_delete_ail(dqp->q_mount,
- (xfs_log_item_t*)qip);
- else
- spin_unlock(&dqp->q_mount->m_ail_lock);
- }
-
- /*
- * Release the dq's flush lock since we're done with it.
- */
- xfs_dqfunlock(dqp);
-}
-
-
-int
-xfs_qm_dqflock_nowait(
- xfs_dquot_t *dqp)
-{
- int locked;
-
- locked = cpsema(&((dqp)->q_flock));
-
- /* XXX ifdef these out */
- if (locked)
- (dqp)->dq_flags |= XFS_DQ_FLOCKED;
- return (locked);
-}
-
-
-int
-xfs_qm_dqlock_nowait(
- xfs_dquot_t *dqp)
-{
- return (mutex_trylock(&((dqp)->q_qlock)));
-}
-
-void
-xfs_dqlock(
- xfs_dquot_t *dqp)
-{
- mutex_lock(&(dqp->q_qlock));
-}
-
-void
-xfs_dqunlock(
- xfs_dquot_t *dqp)
-{
- mutex_unlock(&(dqp->q_qlock));
- if (dqp->q_logitem.qli_dquot == dqp) {
- /* Once was dqp->q_mount, but might just have been cleared */
- xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_mountp,
- (xfs_log_item_t*)&(dqp->q_logitem));
- }
-}
-
-
-void
-xfs_dqunlock_nonotify(
- xfs_dquot_t *dqp)
-{
- mutex_unlock(&(dqp->q_qlock));
-}
-
-void
-xfs_dqlock2(
- xfs_dquot_t *d1,
- xfs_dquot_t *d2)
-{
- if (d1 && d2) {
- ASSERT(d1 != d2);
- if (be32_to_cpu(d1->q_core.d_id) >
- be32_to_cpu(d2->q_core.d_id)) {
- xfs_dqlock(d2);
- xfs_dqlock(d1);
- } else {
- xfs_dqlock(d1);
- xfs_dqlock(d2);
- }
- } else {
- if (d1) {
- xfs_dqlock(d1);
- } else if (d2) {
- xfs_dqlock(d2);
- }
- }
-}
-
-
-/*
- * Take a dquot out of the mount's dqlist as well as the hashlist.
- * This is called via unmount as well as quotaoff, and the purge
- * will always succeed unless there are soft (temp) references
- * outstanding.
- *
- * This returns 0 if it was purged, 1 if it wasn't. It's not an error code
- * that we're returning! XXXsup - not cool.
- */
-/* ARGSUSED */
-int
-xfs_qm_dqpurge(
- xfs_dquot_t *dqp,
- uint flags)
-{
- xfs_dqhash_t *thishash;
- xfs_mount_t *mp = dqp->q_mount;
-
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
- ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash));
-
- xfs_dqlock(dqp);
- /*
- * We really can't afford to purge a dquot that is
- * referenced, because these are hard refs.
- * It shouldn't happen in general because we went thru _all_ inodes in
- * dqrele_all_inodes before calling this and didn't let the mountlock go.
- * However it is possible that we have dquots with temporary
- * references that are not attached to an inode. e.g. see xfs_setattr().
- */
- if (dqp->q_nrefs != 0) {
- xfs_dqunlock(dqp);
- XFS_DQ_HASH_UNLOCK(dqp->q_hash);
- return (1);
- }
-
- ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));
-
- /*
- * If we're turning off quotas, we have to make sure that, for
- * example, we don't delete quota disk blocks while dquots are
- * in the process of getting written to those disk blocks.
- * This dquot might well be on AIL, and we can't leave it there
- * if we're turning off quotas. Basically, we need this flush
- * lock, and are willing to block on it.
- */
- if (! xfs_qm_dqflock_nowait(dqp)) {
- /*
- * Block on the flush lock after nudging dquot buffer,
- * if it is incore.
- */
- xfs_qm_dqflock_pushbuf_wait(dqp);
- }
-
- /*
- * XXXIf we're turning this type of quotas off, we don't care
- * about the dirty metadata sitting in this dquot. OTOH, if
- * we're unmounting, we do care, so we flush it and wait.
- */
- if (XFS_DQ_IS_DIRTY(dqp)) {
- int error;
- xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY");
- /* dqflush unlocks dqflock */
- /*
- * Given that dqpurge is a very rare occurrence, it is OK
- * that we're holding the hashlist and mplist locks
- * across the disk write. But, ... XXXsup
- *
- * We don't care about getting disk errors here. We need
- * to purge this dquot anyway, so we go ahead regardless.
- */
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC);
- if (error)
- xfs_fs_cmn_err(CE_WARN, mp,
- "xfs_qm_dqpurge: dquot %p flush failed", dqp);
- xfs_dqflock(dqp);
- }
- ASSERT(dqp->q_pincount == 0);
- ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
- !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
-
- thishash = dqp->q_hash;
- XQM_HASHLIST_REMOVE(thishash, dqp);
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp);
- /*
- * XXX Move this to the front of the freelist, if we can get the
- * freelist lock.
- */
- ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));
-
- dqp->q_mount = NULL;
- dqp->q_hash = NULL;
- dqp->dq_flags = XFS_DQ_INACTIVE;
- memset(&dqp->q_core, 0, sizeof(dqp->q_core));
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- XFS_DQ_HASH_UNLOCK(thishash);
- return (0);
-}
-
-
-#ifdef QUOTADEBUG
-void
-xfs_qm_dqprint(xfs_dquot_t *dqp)
-{
- cmn_err(CE_DEBUG, "-----------KERNEL DQUOT----------------");
- cmn_err(CE_DEBUG, "---- dquotID = %d",
- (int)be32_to_cpu(dqp->q_core.d_id));
- cmn_err(CE_DEBUG, "---- type = %s", DQFLAGTO_TYPESTR(dqp));
- cmn_err(CE_DEBUG, "---- fs = 0x%p", dqp->q_mount);
- cmn_err(CE_DEBUG, "---- blkno = 0x%x", (int) dqp->q_blkno);
- cmn_err(CE_DEBUG, "---- boffset = 0x%x", (int) dqp->q_bufoffset);
- cmn_err(CE_DEBUG, "---- blkhlimit = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_blk_hardlimit),
- (int)be64_to_cpu(dqp->q_core.d_blk_hardlimit));
- cmn_err(CE_DEBUG, "---- blkslimit = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_blk_softlimit),
- (int)be64_to_cpu(dqp->q_core.d_blk_softlimit));
- cmn_err(CE_DEBUG, "---- inohlimit = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_ino_hardlimit),
- (int)be64_to_cpu(dqp->q_core.d_ino_hardlimit));
- cmn_err(CE_DEBUG, "---- inoslimit = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_ino_softlimit),
- (int)be64_to_cpu(dqp->q_core.d_ino_softlimit));
- cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_bcount),
- (int)be64_to_cpu(dqp->q_core.d_bcount));
- cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_icount),
- (int)be64_to_cpu(dqp->q_core.d_icount));
- cmn_err(CE_DEBUG, "---- btimer = %d",
- (int)be32_to_cpu(dqp->q_core.d_btimer));
- cmn_err(CE_DEBUG, "---- itimer = %d",
- (int)be32_to_cpu(dqp->q_core.d_itimer));
- cmn_err(CE_DEBUG, "---------------------------");
-}
-#endif
-
-/*
- * Give the buffer a little push if it is incore and
- * wait on the flush lock.
- */
-void
-xfs_qm_dqflock_pushbuf_wait(
- xfs_dquot_t *dqp)
-{
- xfs_buf_t *bp;
-
- /*
- * Check to see if the dquot has been flushed delayed
- * write. If so, grab its buffer and send it
- * out immediately. We'll be able to acquire
- * the flush lock when the I/O completes.
- */
- bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(dqp->q_mount),
- XFS_INCORE_TRYLOCK);
- if (bp != NULL) {
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- int error;
- if (XFS_BUF_ISPINNED(bp)) {
- xfs_log_force(dqp->q_mount,
- (xfs_lsn_t)0,
- XFS_LOG_FORCE);
- }
- error = xfs_bawrite(dqp->q_mount, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dqflock_pushbuf_wait: "
- "pushbuf error %d on dqp %p, bp %p",
- error, dqp, bp);
- } else {
- xfs_buf_relse(bp);
- }
- }
- xfs_dqflock(dqp);
-}
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
deleted file mode 100644
index 5c371a92e3e..00000000000
--- a/fs/xfs/quota/xfs_dquot.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DQUOT_H__
-#define __XFS_DQUOT_H__
-
-/*
- * Dquots are structures that hold quota information about a user or a group,
- * much like inodes are for files. In fact, dquots share many characteristics
- * with inodes. However, dquots can also be a centralized resource, relative
- * to a collection of inodes. In this respect, dquots share some characteristics
- * of the superblock.
- * XFS dquots exploit both those in its algorithms. They make every attempt
- * to not be a bottleneck when quotas are on and have minimal impact, if any,
- * when quotas are off.
- */
-
-/*
- * The hash chain headers (hash buckets)
- */
-typedef struct xfs_dqhash {
- struct xfs_dquot *qh_next;
- mutex_t qh_lock;
- uint qh_version; /* ever increasing version */
- uint qh_nelems; /* number of dquots on the list */
-} xfs_dqhash_t;
-
-typedef struct xfs_dqlink {
- struct xfs_dquot *ql_next; /* forward link */
- struct xfs_dquot **ql_prevp; /* pointer to prev ql_next */
-} xfs_dqlink_t;
-
-struct xfs_mount;
-struct xfs_trans;
-
-/*
- * This is the marker which is designed to occupy the first few
- * bytes of the xfs_dquot_t structure. Even inside this, the freelist pointers
- * must come first.
- * This serves as the marker ("sentinel") when we have to restart list
- * iterations because of locking considerations.
- */
-typedef struct xfs_dqmarker {
- struct xfs_dquot*dqm_flnext; /* link to freelist: must be first */
- struct xfs_dquot*dqm_flprev;
- xfs_dqlink_t dqm_mplist; /* link to mount's list of dquots */
- xfs_dqlink_t dqm_hashlist; /* link to the hash chain */
- uint dqm_flags; /* various flags (XFS_DQ_*) */
-} xfs_dqmarker_t;
-
-/*
- * The incore dquot structure
- */
-typedef struct xfs_dquot {
- xfs_dqmarker_t q_lists; /* list ptrs, q_flags (marker) */
- xfs_dqhash_t *q_hash; /* the hashchain header */
- struct xfs_mount*q_mount; /* filesystem this relates to */
- struct xfs_trans*q_transp; /* trans this belongs to currently */
- uint q_nrefs; /* # active refs from inodes */
- xfs_daddr_t q_blkno; /* blkno of dquot buffer */
- int q_bufoffset; /* off of dq in buffer (# dquots) */
- xfs_fileoff_t q_fileoffset; /* offset in quotas file */
-
- struct xfs_dquot*q_gdquot; /* group dquot, hint only */
- xfs_disk_dquot_t q_core; /* actual usage & quotas */
- xfs_dq_logitem_t q_logitem; /* dquot log item */
- xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
- xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */
- xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
- mutex_t q_qlock; /* quota lock */
- sema_t q_flock; /* flush lock */
- uint q_pincount; /* pin count for this dquot */
- sv_t q_pinwait; /* sync var for pinning */
-#ifdef XFS_DQUOT_TRACE
- struct ktrace *q_trace; /* trace header structure */
-#endif
-} xfs_dquot_t;
-
-
-#define dq_flnext q_lists.dqm_flnext
-#define dq_flprev q_lists.dqm_flprev
-#define dq_mplist q_lists.dqm_mplist
-#define dq_hashlist q_lists.dqm_hashlist
-#define dq_flags q_lists.dqm_flags
-
-#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++)
-
-#ifdef DEBUG
-static inline int
-XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
-{
- if (mutex_trylock(&dqp->q_qlock)) {
- mutex_unlock(&dqp->q_qlock);
- return 0;
- }
- return 1;
-}
-#endif
-
-
-/*
- * The following three routines simply manage the q_flock
- * semaphore embedded in the dquot. This semaphore synchronizes
- * processes attempting to flush the in-core dquot back to disk.
- */
-#define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\
- (dqp)->dq_flags |= XFS_DQ_FLOCKED; }
-#define xfs_dqfunlock(dqp) { ASSERT(issemalocked(&((dqp)->q_flock))); \
- vsema(&((dqp)->q_flock)); \
- (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); }
-
-#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock)))
-#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
-#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
-#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
-#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ)
-#define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP)
-#define XFS_DQ_TO_QINF(dqp) ((dqp)->q_mount->m_quotainfo)
-#define XFS_DQ_TO_QIP(dqp) (XFS_QM_ISUDQ(dqp) ? \
- XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \
- XFS_DQ_TO_QINF(dqp)->qi_gquotaip)
-
-#define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \
- (XFS_IS_UQUOTA_ON((d)->q_mount)) : \
- (XFS_IS_OQUOTA_ON((d)->q_mount))))
-
-#ifdef XFS_DQUOT_TRACE
-/*
- * Dquot Tracing stuff.
- */
-#define DQUOT_TRACE_SIZE 64
-#define DQUOT_KTRACE_ENTRY 1
-
-extern void __xfs_dqtrace_entry(xfs_dquot_t *dqp, char *func,
- void *, xfs_inode_t *);
-#define xfs_dqtrace_entry_ino(a,b,ip) \
- __xfs_dqtrace_entry((a), (b), (void*)__return_address, (ip))
-#define xfs_dqtrace_entry(a,b) \
- __xfs_dqtrace_entry((a), (b), (void*)__return_address, NULL)
-#else
-#define xfs_dqtrace_entry(a,b)
-#define xfs_dqtrace_entry_ino(a,b,ip)
-#endif
-
-#ifdef QUOTADEBUG
-extern void xfs_qm_dqprint(xfs_dquot_t *);
-#else
-#define xfs_qm_dqprint(a)
-#endif
-
-extern void xfs_qm_dqdestroy(xfs_dquot_t *);
-extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
-extern int xfs_qm_dqpurge(xfs_dquot_t *, uint);
-extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
-extern int xfs_qm_dqlock_nowait(xfs_dquot_t *);
-extern int xfs_qm_dqflock_nowait(xfs_dquot_t *);
-extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp);
-extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
- xfs_disk_dquot_t *);
-extern void xfs_qm_adjust_dqlimits(xfs_mount_t *,
- xfs_disk_dquot_t *);
-extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
- xfs_dqid_t, uint, uint, xfs_dquot_t **);
-extern void xfs_qm_dqput(xfs_dquot_t *);
-extern void xfs_qm_dqrele(xfs_dquot_t *);
-extern void xfs_dqlock(xfs_dquot_t *);
-extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *);
-extern void xfs_dqunlock(xfs_dquot_t *);
-extern void xfs_dqunlock_nonotify(xfs_dquot_t *);
-
-#endif /* __XFS_DQUOT_H__ */
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
deleted file mode 100644
index 36e05ca7841..00000000000
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ /dev/null
@@ -1,676 +0,0 @@
-/*
- * Copyright (c) 2000-2003 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
-#include "xfs_quota.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_rtalloc.h"
-#include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_trans_priv.h"
-#include "xfs_qm.h"
-
-/*
- * returns the number of iovecs needed to log the given dquot item.
- */
-/* ARGSUSED */
-STATIC uint
-xfs_qm_dquot_logitem_size(
- xfs_dq_logitem_t *logitem)
-{
- /*
- * we need only two iovecs, one for the format, one for the real thing
- */
- return (2);
-}
-
-/*
- * fills in the vector of log iovecs for the given dquot log item.
- */
-STATIC void
-xfs_qm_dquot_logitem_format(
- xfs_dq_logitem_t *logitem,
- xfs_log_iovec_t *logvec)
-{
- ASSERT(logitem);
- ASSERT(logitem->qli_dquot);
-
- logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
- logvec->i_len = sizeof(xfs_dq_logformat_t);
- XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT);
- logvec++;
- logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
- logvec->i_len = sizeof(xfs_disk_dquot_t);
- XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT);
-
- ASSERT(2 == logitem->qli_item.li_desc->lid_size);
- logitem->qli_format.qlf_size = 2;
-
-}
-
-/*
- * Increment the pin count of the given dquot.
- * This value is protected by pinlock spinlock in the xQM structure.
- */
-STATIC void
-xfs_qm_dquot_logitem_pin(
- xfs_dq_logitem_t *logitem)
-{
- xfs_dquot_t *dqp;
-
- dqp = logitem->qli_dquot;
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
- spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
- dqp->q_pincount++;
- spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
-}
-
-/*
- * Decrement the pin count of the given dquot, and wake up
- * anyone in xfs_dqwait_unpin() if the count goes to 0. The
- * dquot must have been previously pinned with a call to xfs_dqpin().
- */
-/* ARGSUSED */
-STATIC void
-xfs_qm_dquot_logitem_unpin(
- xfs_dq_logitem_t *logitem,
- int stale)
-{
- xfs_dquot_t *dqp;
-
- dqp = logitem->qli_dquot;
- ASSERT(dqp->q_pincount > 0);
- spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
- dqp->q_pincount--;
- if (dqp->q_pincount == 0) {
- sv_broadcast(&dqp->q_pinwait);
- }
- spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
-}
-
-/* ARGSUSED */
-STATIC void
-xfs_qm_dquot_logitem_unpin_remove(
- xfs_dq_logitem_t *logitem,
- xfs_trans_t *tp)
-{
- xfs_qm_dquot_logitem_unpin(logitem, 0);
-}
-
-/*
- * Given the logitem, this writes the corresponding dquot entry to disk
- * asynchronously. This is called with the dquot entry securely locked;
- * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
- * at the end.
- */
-STATIC void
-xfs_qm_dquot_logitem_push(
- xfs_dq_logitem_t *logitem)
-{
- xfs_dquot_t *dqp;
- int error;
-
- dqp = logitem->qli_dquot;
-
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
- ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
-
- /*
- * Since we were able to lock the dquot's flush lock and
- * we found it on the AIL, the dquot must be dirty. This
- * is because the dquot is removed from the AIL while still
- * holding the flush lock in xfs_dqflush_done(). Thus, if
- * we found it in the AIL and were able to obtain the flush
- * lock without sleeping, then there must not have been
- * anyone in the process of flushing the dquot.
- */
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
- if (error)
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dquot_logitem_push: push error %d on dqp %p",
- error, dqp);
- xfs_dqunlock(dqp);
-}
-
-/*ARGSUSED*/
-STATIC xfs_lsn_t
-xfs_qm_dquot_logitem_committed(
- xfs_dq_logitem_t *l,
- xfs_lsn_t lsn)
-{
- /*
- * We always re-log the entire dquot when it becomes dirty,
- * so, the latest copy _is_ the only one that matters.
- */
- return (lsn);
-}
-
-
-/*
- * This is called to wait for the given dquot to be unpinned.
- * Most of these pin/unpin routines are plagiarized from inode code.
- */
-void
-xfs_qm_dqunpin_wait(
- xfs_dquot_t *dqp)
-{
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
- if (dqp->q_pincount == 0) {
- return;
- }
-
- /*
- * Give the log a push so we don't wait here too long.
- */
- xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE);
- spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
- if (dqp->q_pincount == 0) {
- spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
- return;
- }
- sv_wait(&(dqp->q_pinwait), PINOD,
- &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s);
-}
-
-/*
- * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
- * the dquot is locked by us, but the flush lock isn't. So, here we are
- * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
- * If so, we want to push it out to help us take this item off the AIL as soon
- * as possible.
- *
- * We must not be holding the AIL lock at this point. Calling incore() to
- * search the buffer cache can be a time consuming thing, and AIL lock is a
- * spinlock.
- */
-STATIC void
-xfs_qm_dquot_logitem_pushbuf(
- xfs_dq_logitem_t *qip)
-{
- xfs_dquot_t *dqp;
- xfs_mount_t *mp;
- xfs_buf_t *bp;
- uint dopush;
-
- dqp = qip->qli_dquot;
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
-
- /*
- * The qli_pushbuf_flag keeps others from
- * trying to duplicate our effort.
- */
- ASSERT(qip->qli_pushbuf_flag != 0);
- ASSERT(qip->qli_push_owner == current_pid());
-
- /*
- * If flushlock isn't locked anymore, chances are that the
- * inode flush completed and the inode was taken off the AIL.
- * So, just get out.
- */
- if (!issemalocked(&(dqp->q_flock)) ||
- ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
- qip->qli_pushbuf_flag = 0;
- xfs_dqunlock(dqp);
- return;
- }
- mp = dqp->q_mount;
- bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
- XFS_QI_DQCHUNKLEN(mp),
- XFS_INCORE_TRYLOCK);
- if (bp != NULL) {
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
- issemalocked(&(dqp->q_flock)));
- qip->qli_pushbuf_flag = 0;
- xfs_dqunlock(dqp);
-
- if (XFS_BUF_ISPINNED(bp)) {
- xfs_log_force(mp, (xfs_lsn_t)0,
- XFS_LOG_FORCE);
- }
- if (dopush) {
- int error;
-#ifdef XFSRACEDEBUG
- delay_for_intr();
- delay(300);
-#endif
- error = xfs_bawrite(mp, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, mp,
- "xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p",
- error, qip, bp);
- } else {
- xfs_buf_relse(bp);
- }
- } else {
- qip->qli_pushbuf_flag = 0;
- xfs_dqunlock(dqp);
- xfs_buf_relse(bp);
- }
- return;
- }
-
- qip->qli_pushbuf_flag = 0;
- xfs_dqunlock(dqp);
-}
-
-/*
- * This is called to attempt to lock the dquot associated with this
- * dquot log item. Don't sleep on the dquot lock or the flush lock.
- * If the flush lock is already held, indicating that the dquot has
- * been or is in the process of being flushed, then see if we can
- * find the dquot's buffer in the buffer cache without sleeping. If
- * we can and it is marked delayed write, then we want to send it out.
- * We delay doing so until the push routine, though, to avoid sleeping
- * in any device strategy routines.
- */
-STATIC uint
-xfs_qm_dquot_logitem_trylock(
- xfs_dq_logitem_t *qip)
-{
- xfs_dquot_t *dqp;
- uint retval;
-
- dqp = qip->qli_dquot;
- if (dqp->q_pincount > 0)
- return (XFS_ITEM_PINNED);
-
- if (! xfs_qm_dqlock_nowait(dqp))
- return (XFS_ITEM_LOCKED);
-
- retval = XFS_ITEM_SUCCESS;
- if (! xfs_qm_dqflock_nowait(dqp)) {
- /*
- * The dquot is already being flushed. It may have been
- * flushed delayed write, however, and we don't want to
- * get stuck waiting for that to complete. So, we want to check
- * to see if we can lock the dquot's buffer without sleeping.
- * If we can and it is marked for delayed write, then we
- * hold it and send it out from the push routine. We don't
- * want to do that now since we might sleep in the device
- * strategy routine. We also don't want to grab the buffer lock
- * here because we'd like not to call into the buffer cache
- * while holding the AIL lock.
- * Make sure to only return PUSHBUF if we set pushbuf_flag
- * ourselves. If someone else is doing it then we don't
- * want to go to the push routine and duplicate their efforts.
- */
- if (qip->qli_pushbuf_flag == 0) {
- qip->qli_pushbuf_flag = 1;
- ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
-#ifdef DEBUG
- qip->qli_push_owner = current_pid();
-#endif
- /*
- * The dquot is left locked.
- */
- retval = XFS_ITEM_PUSHBUF;
- } else {
- retval = XFS_ITEM_FLUSHING;
- xfs_dqunlock_nonotify(dqp);
- }
- }
-
- ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
- return (retval);
-}
-
-
-/*
- * Unlock the dquot associated with the log item.
- * Clear the fields of the dquot and dquot log item that
- * are specific to the current transaction. If the
- * hold flags is set, do not unlock the dquot.
- */
-STATIC void
-xfs_qm_dquot_logitem_unlock(
- xfs_dq_logitem_t *ql)
-{
- xfs_dquot_t *dqp;
-
- ASSERT(ql != NULL);
- dqp = ql->qli_dquot;
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
-
- /*
- * Clear the transaction pointer in the dquot
- */
- dqp->q_transp = NULL;
-
- /*
- * dquots are never 'held' from getting unlocked at the end of
- * a transaction. Their locking and unlocking is hidden inside the
- * transaction layer, within trans_commit. Hence, no LI_HOLD flag
- * for the logitem.
- */
- xfs_dqunlock(dqp);
-}
-
-
-/*
- * this needs to stamp an lsn into the dquot, I think.
- * rpc's that look at user dquot's would then have to
- * push on the dependency recorded in the dquot
- */
-/* ARGSUSED */
-STATIC void
-xfs_qm_dquot_logitem_committing(
- xfs_dq_logitem_t *l,
- xfs_lsn_t lsn)
-{
- return;
-}
-
-
-/*
- * This is the ops vector for dquots
- */
-static struct xfs_item_ops xfs_dquot_item_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_qm_dquot_logitem_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))
- xfs_qm_dquot_logitem_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
- xfs_qm_dquot_logitem_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))
- xfs_qm_dquot_logitem_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_dquot_logitem_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push,
- .iop_pushbuf = (void(*)(xfs_log_item_t*))
- xfs_qm_dquot_logitem_pushbuf,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_dquot_logitem_committing
-};
-
-/*
- * Initialize the dquot log item for a newly allocated dquot.
- * The dquot isn't locked at this point, but it isn't on any of the lists
- * either, so we don't care.
- */
-void
-xfs_qm_dquot_logitem_init(
- struct xfs_dquot *dqp)
-{
- xfs_dq_logitem_t *lp;
- lp = &dqp->q_logitem;
-
- lp->qli_item.li_type = XFS_LI_DQUOT;
- lp->qli_item.li_ops = &xfs_dquot_item_ops;
- lp->qli_item.li_mountp = dqp->q_mount;
- lp->qli_dquot = dqp;
- lp->qli_format.qlf_type = XFS_LI_DQUOT;
- lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id);
- lp->qli_format.qlf_blkno = dqp->q_blkno;
- lp->qli_format.qlf_len = 1;
- /*
- * This is just the offset of this dquot within its buffer
- * (which is currently 1 FSB and probably won't change).
- * Hence 32 bits for this offset should be just fine.
- * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t))
- * here, and recompute it at recovery time.
- */
- lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset;
-}
-
-/*------------------ QUOTAOFF LOG ITEMS -------------------*/
-
-/*
- * This returns the number of iovecs needed to log the given quotaoff item.
- * We only need 1 iovec for an quotaoff item. It just logs the
- * quotaoff_log_format structure.
- */
-/*ARGSUSED*/
-STATIC uint
-xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
-{
- return (1);
-}
-
-/*
- * This is called to fill in the vector of log iovecs for the
- * given quotaoff log item. We use only 1 iovec, and we point that
- * at the quotaoff_log_format structure embedded in the quotaoff item.
- * It is at this point that we assert that all of the extent
- * slots in the quotaoff item have been filled.
- */
-STATIC void
-xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf,
- xfs_log_iovec_t *log_vector)
-{
- ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF);
-
- log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
- log_vector->i_len = sizeof(xfs_qoff_logitem_t);
- XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_QUOTAOFF);
- qf->qql_format.qf_size = 1;
-}
-
-
-/*
- * Pinning has no meaning for an quotaoff item, so just return.
- */
-/*ARGSUSED*/
-STATIC void
-xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
-{
- return;
-}
-
-
-/*
- * Since pinning has no meaning for an quotaoff item, unpinning does
- * not either.
- */
-/*ARGSUSED*/
-STATIC void
-xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf, int stale)
-{
- return;
-}
-
-/*ARGSUSED*/
-STATIC void
-xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp)
-{
- return;
-}
-
-/*
- * Quotaoff items have no locking, so just return success.
- */
-/*ARGSUSED*/
-STATIC uint
-xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
-{
- return XFS_ITEM_LOCKED;
-}
-
-/*
- * Quotaoff items have no locking or pushing, so return failure
- * so that the caller doesn't bother with us.
- */
-/*ARGSUSED*/
-STATIC void
-xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf)
-{
- return;
-}
-
-/*
- * The quotaoff-start-item is logged only once and cannot be moved in the log,
- * so simply return the lsn at which it's been logged.
- */
-/*ARGSUSED*/
-STATIC xfs_lsn_t
-xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn)
-{
- return (lsn);
-}
-
-/*
- * There isn't much you can do to push on an quotaoff item. It is simply
- * stuck waiting for the log to be flushed to disk.
- */
-/*ARGSUSED*/
-STATIC void
-xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf)
-{
- return;
-}
-
-
-/*ARGSUSED*/
-STATIC xfs_lsn_t
-xfs_qm_qoffend_logitem_committed(
- xfs_qoff_logitem_t *qfe,
- xfs_lsn_t lsn)
-{
- xfs_qoff_logitem_t *qfs;
-
- qfs = qfe->qql_start_lip;
- spin_lock(&qfs->qql_item.li_mountp->m_ail_lock);
- /*
- * Delete the qoff-start logitem from the AIL.
- * xfs_trans_delete_ail() drops the AIL lock.
- */
- xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs);
- kmem_free(qfs, sizeof(xfs_qoff_logitem_t));
- kmem_free(qfe, sizeof(xfs_qoff_logitem_t));
- return (xfs_lsn_t)-1;
-}
-
-/*
- * XXX rcc - don't know quite what to do with this. I think we can
- * just ignore it. The only time that isn't the case is if we allow
- * the client to somehow see that quotas have been turned off in which
- * we can't allow that to get back until the quotaoff hits the disk.
- * So how would that happen? Also, do we need different routines for
- * quotaoff start and quotaoff end? I suspect the answer is yes but
- * to be sure, I need to look at the recovery code and see how quota off
- * recovery is handled (do we roll forward or back or do something else).
- * If we roll forwards or backwards, then we need two separate routines,
- * one that does nothing and one that stamps in the lsn that matters
- * (truly makes the quotaoff irrevocable). If we do something else,
- * then maybe we don't need two.
- */
-/* ARGSUSED */
-STATIC void
-xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
-{
- return;
-}
-
-/* ARGSUSED */
-STATIC void
-xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
-{
- return;
-}
-
-static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_qm_qoff_logitem_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t* ,int))
- xfs_qm_qoff_logitem_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
- xfs_qm_qoff_logitem_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_qoffend_logitem_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
- .iop_pushbuf = NULL,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_qoffend_logitem_committing
-};
-
-/*
- * This is the ops vector shared by all quotaoff-start log items.
- */
-static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_qm_qoff_logitem_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))
- xfs_qm_qoff_logitem_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
- xfs_qm_qoff_logitem_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_qoff_logitem_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
- .iop_pushbuf = NULL,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_qoff_logitem_committing
-};
-
-/*
- * Allocate and initialize an quotaoff item of the correct quota type(s).
- */
-xfs_qoff_logitem_t *
-xfs_qm_qoff_logitem_init(
- struct xfs_mount *mp,
- xfs_qoff_logitem_t *start,
- uint flags)
-{
- xfs_qoff_logitem_t *qf;
-
- qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP);
-
- qf->qql_item.li_type = XFS_LI_QUOTAOFF;
- if (start)
- qf->qql_item.li_ops = &xfs_qm_qoffend_logitem_ops;
- else
- qf->qql_item.li_ops = &xfs_qm_qoff_logitem_ops;
- qf->qql_item.li_mountp = mp;
- qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
- qf->qql_format.qf_flags = flags;
- qf->qql_start_lip = start;
- return (qf);
-}
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
deleted file mode 100644
index d31cce1165c..00000000000
--- a/fs/xfs/quota/xfs_qm.c
+++ /dev/null
@@ -1,2835 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_clnt.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
-#include "xfs_quota.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_itable.h"
-#include "xfs_rtalloc.h"
-#include "xfs_error.h"
-#include "xfs_bmap.h"
-#include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_trans_space.h"
-#include "xfs_utils.h"
-#include "xfs_qm.h"
-
-/*
- * The global quota manager. There is only one of these for the entire
- * system, _not_ one per file system. XQM keeps track of the overall
- * quota functionality, including maintaining the freelist and hash
- * tables of dquots.
- */
-mutex_t xfs_Gqm_lock;
-struct xfs_qm *xfs_Gqm;
-uint ndquot;
-
-kmem_zone_t *qm_dqzone;
-kmem_zone_t *qm_dqtrxzone;
-
-static cred_t xfs_zerocr;
-
-STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
-STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
-
-STATIC void xfs_qm_freelist_init(xfs_frlist_t *);
-STATIC void xfs_qm_freelist_destroy(xfs_frlist_t *);
-STATIC int xfs_qm_mplist_nowait(xfs_mount_t *);
-STATIC int xfs_qm_dqhashlock_nowait(xfs_dquot_t *);
-
-STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
-STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
-STATIC int xfs_qm_shake(int, gfp_t);
-
-static struct shrinker xfs_qm_shaker = {
- .shrink = xfs_qm_shake,
- .seeks = DEFAULT_SEEKS,
-};
-
-#ifdef DEBUG
-extern mutex_t qcheck_lock;
-#endif
-
-#ifdef QUOTADEBUG
-#define XQM_LIST_PRINT(l, NXT, title) \
-{ \
- xfs_dquot_t *dqp; int i = 0; \
- cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
- for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
- cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " \
- "bcnt = %d, icnt = %d, refs = %d", \
- ++i, (int) be32_to_cpu(dqp->q_core.d_id), \
- DQFLAGTO_TYPESTR(dqp), \
- (int) be64_to_cpu(dqp->q_core.d_bcount), \
- (int) be64_to_cpu(dqp->q_core.d_icount), \
- (int) dqp->q_nrefs); } \
-}
-#else
-#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
-#endif
-
-/*
- * Initialize the XQM structure.
- * Note that there is not one quota manager per file system.
- */
-STATIC struct xfs_qm *
-xfs_Gqm_init(void)
-{
- xfs_dqhash_t *udqhash, *gdqhash;
- xfs_qm_t *xqm;
- size_t hsize;
- uint i;
-
- /*
- * Initialize the dquot hash tables.
- */
- udqhash = kmem_zalloc_greedy(&hsize,
- XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
- XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
- KM_SLEEP | KM_MAYFAIL | KM_LARGE);
- gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
- hsize /= sizeof(xfs_dqhash_t);
- ndquot = hsize << 8;
-
- xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
- xqm->qm_dqhashmask = hsize - 1;
- xqm->qm_usr_dqhtable = udqhash;
- xqm->qm_grp_dqhtable = gdqhash;
- ASSERT(xqm->qm_usr_dqhtable != NULL);
- ASSERT(xqm->qm_grp_dqhtable != NULL);
-
- for (i = 0; i < hsize; i++) {
- xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i);
- xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i);
- }
-
- /*
- * Freelist of all dquots of all file systems
- */
- xfs_qm_freelist_init(&(xqm->qm_dqfreelist));
-
- /*
- * dquot zone. we register our own low-memory callback.
- */
- if (!qm_dqzone) {
- xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t),
- "xfs_dquots");
- qm_dqzone = xqm->qm_dqzone;
- } else
- xqm->qm_dqzone = qm_dqzone;
-
- register_shrinker(&xfs_qm_shaker);
-
- /*
- * The t_dqinfo portion of transactions.
- */
- if (!qm_dqtrxzone) {
- xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t),
- "xfs_dqtrx");
- qm_dqtrxzone = xqm->qm_dqtrxzone;
- } else
- xqm->qm_dqtrxzone = qm_dqtrxzone;
-
- atomic_set(&xqm->qm_totaldquots, 0);
- xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
- xqm->qm_nrefs = 0;
-#ifdef DEBUG
- mutex_init(&qcheck_lock);
-#endif
- return xqm;
-}
-
-/*
- * Destroy the global quota manager when its reference count goes to zero.
- */
-STATIC void
-xfs_qm_destroy(
- struct xfs_qm *xqm)
-{
- int hsize, i;
-
- ASSERT(xqm != NULL);
- ASSERT(xqm->qm_nrefs == 0);
- unregister_shrinker(&xfs_qm_shaker);
- hsize = xqm->qm_dqhashmask + 1;
- for (i = 0; i < hsize; i++) {
- xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
- xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
- }
- kmem_free(xqm->qm_usr_dqhtable, hsize * sizeof(xfs_dqhash_t));
- kmem_free(xqm->qm_grp_dqhtable, hsize * sizeof(xfs_dqhash_t));
- xqm->qm_usr_dqhtable = NULL;
- xqm->qm_grp_dqhtable = NULL;
- xqm->qm_dqhashmask = 0;
- xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist));
-#ifdef DEBUG
- mutex_destroy(&qcheck_lock);
-#endif
- kmem_free(xqm, sizeof(xfs_qm_t));
-}
-
-/*
- * Called at mount time to let XQM know that another file system is
- * starting quotas. This isn't crucial information as the individual mount
- * structures are pretty independent, but it helps the XQM keep a
- * global view of what's going on.
- */
-/* ARGSUSED */
-STATIC int
-xfs_qm_hold_quotafs_ref(
- struct xfs_mount *mp)
-{
- /*
- * Need to lock the xfs_Gqm structure for things like this. For example,
- * the structure could disappear between the entry to this routine and
- * a HOLD operation if not locked.
- */
- XFS_QM_LOCK(xfs_Gqm);
-
- if (xfs_Gqm == NULL)
- xfs_Gqm = xfs_Gqm_init();
- /*
- * We can keep a list of all filesystems with quotas mounted for
- * debugging and statistical purposes, but ...
- * Just take a reference and get out.
- */
- XFS_QM_HOLD(xfs_Gqm);
- XFS_QM_UNLOCK(xfs_Gqm);
-
- return 0;
-}
-
-
-/*
- * Release the reference that a filesystem took at mount time,
- * so that we know when we need to destroy the entire quota manager.
- */
-/* ARGSUSED */
-STATIC void
-xfs_qm_rele_quotafs_ref(
- struct xfs_mount *mp)
-{
- xfs_dquot_t *dqp, *nextdqp;
-
- ASSERT(xfs_Gqm);
- ASSERT(xfs_Gqm->qm_nrefs > 0);
-
- /*
- * Go thru the freelist and destroy all inactive dquots.
- */
- xfs_qm_freelist_lock(xfs_Gqm);
-
- for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
- dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) {
- xfs_dqlock(dqp);
- nextdqp = dqp->dq_flnext;
- if (dqp->dq_flags & XFS_DQ_INACTIVE) {
- ASSERT(dqp->q_mount == NULL);
- ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(dqp->HL_PREVP == NULL);
- ASSERT(dqp->MPL_PREVP == NULL);
- XQM_FREELIST_REMOVE(dqp);
- xfs_dqunlock(dqp);
- xfs_qm_dqdestroy(dqp);
- } else {
- xfs_dqunlock(dqp);
- }
- dqp = nextdqp;
- }
- xfs_qm_freelist_unlock(xfs_Gqm);
-
- /*
- * Destroy the entire XQM. If somebody mounts with quotaon, this'll
- * be restarted.
- */
- XFS_QM_LOCK(xfs_Gqm);
- XFS_QM_RELE(xfs_Gqm);
- if (xfs_Gqm->qm_nrefs == 0) {
- xfs_qm_destroy(xfs_Gqm);
- xfs_Gqm = NULL;
- }
- XFS_QM_UNLOCK(xfs_Gqm);
-}
-
-/*
- * Just destroy the quotainfo structure.
- */
-void
-xfs_qm_unmount_quotadestroy(
- xfs_mount_t *mp)
-{
- if (mp->m_quotainfo)
- xfs_qm_destroy_quotainfo(mp);
-}
-
-
-/*
- * This is called from xfs_mountfs to start quotas and initialize all
- * necessary data structures like quotainfo. This is also responsible for
- * running a quotacheck as necessary. We are guaranteed that the superblock
- * is consistently read in at this point.
- *
- * If we fail here, the mount will continue with quota turned off. We don't
- * need to inidicate success or failure at all.
- */
-void
-xfs_qm_mount_quotas(
- xfs_mount_t *mp,
- int mfsi_flags)
-{
- int error = 0;
- uint sbf;
-
- /*
- * If quotas on realtime volumes is not supported, we disable
- * quotas immediately.
- */
- if (mp->m_sb.sb_rextents) {
- cmn_err(CE_NOTE,
- "Cannot turn on quotas for realtime filesystem %s",
- mp->m_fsname);
- mp->m_qflags = 0;
- goto write_changes;
- }
-
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
-
- /*
- * Allocate the quotainfo structure inside the mount struct, and
- * create quotainode(s), and change/rev superblock if necessary.
- */
- error = xfs_qm_init_quotainfo(mp);
- if (error) {
- /*
- * We must turn off quotas.
- */
- ASSERT(mp->m_quotainfo == NULL);
- mp->m_qflags = 0;
- goto write_changes;
- }
- /*
- * If any of the quotas are not consistent, do a quotacheck.
- */
- if (XFS_QM_NEED_QUOTACHECK(mp) &&
- !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
- error = xfs_qm_quotacheck(mp);
- if (error) {
- /* Quotacheck failed and disabled quotas. */
- return;
- }
- }
- /*
- * If one type of quotas is off, then it will lose its
- * quotachecked status, since we won't be doing accounting for
- * that type anymore.
- */
- if (!XFS_IS_UQUOTA_ON(mp))
- mp->m_qflags &= ~XFS_UQUOTA_CHKD;
- if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
- mp->m_qflags &= ~XFS_OQUOTA_CHKD;
-
- write_changes:
- /*
- * We actually don't have to acquire the m_sb_lock at all.
- * This can only be called from mount, and that's single threaded. XXX
- */
- spin_lock(&mp->m_sb_lock);
- sbf = mp->m_sb.sb_qflags;
- mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
- spin_unlock(&mp->m_sb_lock);
-
- if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
- if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
- /*
- * We could only have been turning quotas off.
- * We aren't in very good shape actually because
- * the incore structures are convinced that quotas are
- * off, but the on disk superblock doesn't know that !
- */
- ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
- xfs_fs_cmn_err(CE_ALERT, mp,
- "XFS mount_quotas: Superblock update failed!");
- }
- }
-
- if (error) {
- xfs_fs_cmn_err(CE_WARN, mp,
- "Failed to initialize disk quotas.");
- }
- return;
-}
-
-/*
- * Called from the vfsops layer.
- */
-int
-xfs_qm_unmount_quotas(
- xfs_mount_t *mp)
-{
- xfs_inode_t *uqp, *gqp;
- int error = 0;
-
- /*
- * Release the dquots that root inode, et al might be holding,
- * before we flush quotas and blow away the quotainfo structure.
- */
- ASSERT(mp->m_rootip);
- xfs_qm_dqdetach(mp->m_rootip);
- if (mp->m_rbmip)
- xfs_qm_dqdetach(mp->m_rbmip);
- if (mp->m_rsumip)
- xfs_qm_dqdetach(mp->m_rsumip);
-
- /*
- * Flush out the quota inodes.
- */
- uqp = gqp = NULL;
- if (mp->m_quotainfo) {
- if ((uqp = mp->m_quotainfo->qi_uquotaip) != NULL) {
- xfs_ilock(uqp, XFS_ILOCK_EXCL);
- xfs_iflock(uqp);
- error = xfs_iflush(uqp, XFS_IFLUSH_SYNC);
- xfs_iunlock(uqp, XFS_ILOCK_EXCL);
- if (unlikely(error == EFSCORRUPTED)) {
- XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)",
- XFS_ERRLEVEL_LOW, mp);
- goto out;
- }
- }
- if ((gqp = mp->m_quotainfo->qi_gquotaip) != NULL) {
- xfs_ilock(gqp, XFS_ILOCK_EXCL);
- xfs_iflock(gqp);
- error = xfs_iflush(gqp, XFS_IFLUSH_SYNC);
- xfs_iunlock(gqp, XFS_ILOCK_EXCL);
- if (unlikely(error == EFSCORRUPTED)) {
- XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)",
- XFS_ERRLEVEL_LOW, mp);
- goto out;
- }
- }
- }
- if (uqp) {
- XFS_PURGE_INODE(uqp);
- mp->m_quotainfo->qi_uquotaip = NULL;
- }
- if (gqp) {
- XFS_PURGE_INODE(gqp);
- mp->m_quotainfo->qi_gquotaip = NULL;
- }
-out:
- return XFS_ERROR(error);
-}
-
-/*
- * Flush all dquots of the given file system to disk. The dquots are
- * _not_ purged from memory here, just their data written to disk.
- */
-STATIC int
-xfs_qm_dqflush_all(
- xfs_mount_t *mp,
- int flags)
-{
- int recl;
- xfs_dquot_t *dqp;
- int niters;
- int error;
-
- if (mp->m_quotainfo == NULL)
- return 0;
- niters = 0;
-again:
- xfs_qm_mplist_lock(mp);
- FOREACH_DQUOT_IN_MP(dqp, mp) {
- xfs_dqlock(dqp);
- if (! XFS_DQ_IS_DIRTY(dqp)) {
- xfs_dqunlock(dqp);
- continue;
- }
- xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
- /* XXX a sentinel would be better */
- recl = XFS_QI_MPLRECLAIMS(mp);
- if (! xfs_qm_dqflock_nowait(dqp)) {
- /*
- * If we can't grab the flush lock then check
- * to see if the dquot has been flushed delayed
- * write. If so, grab its buffer and send it
- * out immediately. We'll be able to acquire
- * the flush lock when the I/O completes.
- */
- xfs_qm_dqflock_pushbuf_wait(dqp);
- }
- /*
- * Let go of the mplist lock. We don't want to hold it
- * across a disk write.
- */
- xfs_qm_mplist_unlock(mp);
- error = xfs_qm_dqflush(dqp, flags);
- xfs_dqunlock(dqp);
- if (error)
- return error;
-
- xfs_qm_mplist_lock(mp);
- if (recl != XFS_QI_MPLRECLAIMS(mp)) {
- xfs_qm_mplist_unlock(mp);
- /* XXX restart limit */
- goto again;
- }
- }
-
- xfs_qm_mplist_unlock(mp);
- /* return ! busy */
- return 0;
-}
-/*
- * Release the group dquot pointers the user dquots may be
- * carrying around as a hint. mplist is locked on entry and exit.
- */
-STATIC void
-xfs_qm_detach_gdquots(
- xfs_mount_t *mp)
-{
- xfs_dquot_t *dqp, *gdqp;
- int nrecl;
-
- again:
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
- dqp = XFS_QI_MPLNEXT(mp);
- while (dqp) {
- xfs_dqlock(dqp);
- if ((gdqp = dqp->q_gdquot)) {
- xfs_dqlock(gdqp);
- dqp->q_gdquot = NULL;
- }
- xfs_dqunlock(dqp);
-
- if (gdqp) {
- /*
- * Can't hold the mplist lock across a dqput.
- * XXXmust convert to marker based iterations here.
- */
- nrecl = XFS_QI_MPLRECLAIMS(mp);
- xfs_qm_mplist_unlock(mp);
- xfs_qm_dqput(gdqp);
-
- xfs_qm_mplist_lock(mp);
- if (nrecl != XFS_QI_MPLRECLAIMS(mp))
- goto again;
- }
- dqp = dqp->MPL_NEXT;
- }
-}
-
-/*
- * Go through all the incore dquots of this file system and take them
- * off the mplist and hashlist, if the dquot type matches the dqtype
- * parameter. This is used when turning off quota accounting for
- * users and/or groups, as well as when the filesystem is unmounting.
- */
-STATIC int
-xfs_qm_dqpurge_int(
- xfs_mount_t *mp,
- uint flags) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */
-{
- xfs_dquot_t *dqp;
- uint dqtype;
- int nrecl;
- xfs_dquot_t *nextdqp;
- int nmisses;
-
- if (mp->m_quotainfo == NULL)
- return 0;
-
- dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
- dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
- dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
-
- xfs_qm_mplist_lock(mp);
-
- /*
- * In the first pass through all incore dquots of this filesystem,
- * we release the group dquot pointers the user dquots may be
- * carrying around as a hint. We need to do this irrespective of
- * what's being turned off.
- */
- xfs_qm_detach_gdquots(mp);
-
- again:
- nmisses = 0;
- ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
- /*
- * Try to get rid of all of the unwanted dquots. The idea is to
- * get them off mplist and hashlist, but leave them on freelist.
- */
- dqp = XFS_QI_MPLNEXT(mp);
- while (dqp) {
- /*
- * It's OK to look at the type without taking dqlock here.
- * We're holding the mplist lock here, and that's needed for
- * a dqreclaim.
- */
- if ((dqp->dq_flags & dqtype) == 0) {
- dqp = dqp->MPL_NEXT;
- continue;
- }
-
- if (! xfs_qm_dqhashlock_nowait(dqp)) {
- nrecl = XFS_QI_MPLRECLAIMS(mp);
- xfs_qm_mplist_unlock(mp);
- XFS_DQ_HASH_LOCK(dqp->q_hash);
- xfs_qm_mplist_lock(mp);
-
- /*
- * XXXTheoretically, we can get into a very long
- * ping pong game here.
- * No one can be adding dquots to the mplist at
- * this point, but somebody might be taking things off.
- */
- if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
- XFS_DQ_HASH_UNLOCK(dqp->q_hash);
- goto again;
- }
- }
-
- /*
- * Take the dquot off the mplist and hashlist. It may remain on
- * freelist in INACTIVE state.
- */
- nextdqp = dqp->MPL_NEXT;
- nmisses += xfs_qm_dqpurge(dqp, flags);
- dqp = nextdqp;
- }
- xfs_qm_mplist_unlock(mp);
- return nmisses;
-}
-
-int
-xfs_qm_dqpurge_all(
- xfs_mount_t *mp,
- uint flags)
-{
- int ndquots;
-
- /*
- * Purge the dquot cache.
- * None of the dquots should really be busy at this point.
- */
- if (mp->m_quotainfo) {
- while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) {
- delay(ndquots * 10);
- }
- }
- return 0;
-}
-
-STATIC int
-xfs_qm_dqattach_one(
- xfs_inode_t *ip,
- xfs_dqid_t id,
- uint type,
- uint doalloc,
- uint dolock,
- xfs_dquot_t *udqhint, /* hint */
- xfs_dquot_t **IO_idqpp)
-{
- xfs_dquot_t *dqp;
- int error;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- error = 0;
- /*
- * See if we already have it in the inode itself. IO_idqpp is
- * &i_udquot or &i_gdquot. This made the code look weird, but
- * made the logic a lot simpler.
- */
- if ((dqp = *IO_idqpp)) {
- if (dolock)
- xfs_dqlock(dqp);
- xfs_dqtrace_entry(dqp, "DQATTACH: found in ip");
- goto done;
- }
-
- /*
- * udqhint is the i_udquot field in inode, and is non-NULL only
- * when the type arg is group/project. Its purpose is to save a
- * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
- * the user dquot.
- */
- ASSERT(!udqhint || type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
- if (udqhint && !dolock)
- xfs_dqlock(udqhint);
-
- /*
- * No need to take dqlock to look at the id.
- * The ID can't change until it gets reclaimed, and it won't
- * be reclaimed as long as we have a ref from inode and we hold
- * the ilock.
- */
- if (udqhint &&
- (dqp = udqhint->q_gdquot) &&
- (be32_to_cpu(dqp->q_core.d_id) == id)) {
- ASSERT(XFS_DQ_IS_LOCKED(udqhint));
- xfs_dqlock(dqp);
- XFS_DQHOLD(dqp);
- ASSERT(*IO_idqpp == NULL);
- *IO_idqpp = dqp;
- if (!dolock) {
- xfs_dqunlock(dqp);
- xfs_dqunlock(udqhint);
- }
- goto done;
- }
- /*
- * We can't hold a dquot lock when we call the dqget code.
- * We'll deadlock in no time, because of (not conforming to)
- * lock ordering - the inodelock comes before any dquot lock,
- * and we may drop and reacquire the ilock in xfs_qm_dqget().
- */
- if (udqhint)
- xfs_dqunlock(udqhint);
- /*
- * Find the dquot from somewhere. This bumps the
- * reference count of dquot and returns it locked.
- * This can return ENOENT if dquot didn't exist on
- * disk and we didn't ask it to allocate;
- * ESRCH if quotas got turned off suddenly.
- */
- if ((error = xfs_qm_dqget(ip->i_mount, ip, id, type,
- doalloc|XFS_QMOPT_DOWARN, &dqp))) {
- if (udqhint && dolock)
- xfs_dqlock(udqhint);
- goto done;
- }
-
- xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget");
- /*
- * dqget may have dropped and re-acquired the ilock, but it guarantees
- * that the dquot returned is the one that should go in the inode.
- */
- *IO_idqpp = dqp;
- ASSERT(dqp);
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
- if (! dolock) {
- xfs_dqunlock(dqp);
- goto done;
- }
- if (! udqhint)
- goto done;
-
- ASSERT(udqhint);
- ASSERT(dolock);
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
- if (! xfs_qm_dqlock_nowait(udqhint)) {
- xfs_dqunlock(dqp);
- xfs_dqlock(udqhint);
- xfs_dqlock(dqp);
- }
- done:
-#ifdef QUOTADEBUG
- if (udqhint) {
- if (dolock)
- ASSERT(XFS_DQ_IS_LOCKED(udqhint));
- }
- if (! error) {
- if (dolock)
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
- }
-#endif
- return error;
-}
-
-
-/*
- * Given a udquot and gdquot, attach a ptr to the group dquot in the
- * udquot as a hint for future lookups. The idea sounds simple, but the
- * execution isn't, because the udquot might have a group dquot attached
- * already and getting rid of that gets us into lock ordering constraints.
- * The process is complicated more by the fact that the dquots may or may not
- * be locked on entry.
- */
-STATIC void
-xfs_qm_dqattach_grouphint(
- xfs_dquot_t *udq,
- xfs_dquot_t *gdq,
- uint locked)
-{
- xfs_dquot_t *tmp;
-
-#ifdef QUOTADEBUG
- if (locked) {
- ASSERT(XFS_DQ_IS_LOCKED(udq));
- ASSERT(XFS_DQ_IS_LOCKED(gdq));
- }
-#endif
- if (! locked)
- xfs_dqlock(udq);
-
- if ((tmp = udq->q_gdquot)) {
- if (tmp == gdq) {
- if (! locked)
- xfs_dqunlock(udq);
- return;
- }
-
- udq->q_gdquot = NULL;
- /*
- * We can't keep any dqlocks when calling dqrele,
- * because the freelist lock comes before dqlocks.
- */
- xfs_dqunlock(udq);
- if (locked)
- xfs_dqunlock(gdq);
- /*
- * we took a hard reference once upon a time in dqget,
- * so give it back when the udquot no longer points at it
- * dqput() does the unlocking of the dquot.
- */
- xfs_qm_dqrele(tmp);
-
- xfs_dqlock(udq);
- xfs_dqlock(gdq);
-
- } else {
- ASSERT(XFS_DQ_IS_LOCKED(udq));
- if (! locked) {
- xfs_dqlock(gdq);
- }
- }
-
- ASSERT(XFS_DQ_IS_LOCKED(udq));
- ASSERT(XFS_DQ_IS_LOCKED(gdq));
- /*
- * Somebody could have attached a gdquot here,
- * when we dropped the uqlock. If so, just do nothing.
- */
- if (udq->q_gdquot == NULL) {
- XFS_DQHOLD(gdq);
- udq->q_gdquot = gdq;
- }
- if (! locked) {
- xfs_dqunlock(gdq);
- xfs_dqunlock(udq);
- }
-}
-
-
-/*
- * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
- * into account.
- * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
- * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
- * much made this code a complete mess, but it has been pretty useful.
- * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
- * Inode may get unlocked and relocked in here, and the caller must deal with
- * the consequences.
- */
-int
-xfs_qm_dqattach(
- xfs_inode_t *ip,
- uint flags)
-{
- xfs_mount_t *mp = ip->i_mount;
- uint nquotas = 0;
- int error = 0;
-
- if ((! XFS_IS_QUOTA_ON(mp)) ||
- (! XFS_NOT_DQATTACHED(mp, ip)) ||
- (ip->i_ino == mp->m_sb.sb_uquotino) ||
- (ip->i_ino == mp->m_sb.sb_gquotino))
- return 0;
-
- ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
- xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
- if (! (flags & XFS_QMOPT_ILOCKED))
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- if (XFS_IS_UQUOTA_ON(mp)) {
- error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
- flags & XFS_QMOPT_DQALLOC,
- flags & XFS_QMOPT_DQLOCK,
- NULL, &ip->i_udquot);
- if (error)
- goto done;
- nquotas++;
- }
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- if (XFS_IS_OQUOTA_ON(mp)) {
- error = XFS_IS_GQUOTA_ON(mp) ?
- xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
- flags & XFS_QMOPT_DQALLOC,
- flags & XFS_QMOPT_DQLOCK,
- ip->i_udquot, &ip->i_gdquot) :
- xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
- flags & XFS_QMOPT_DQALLOC,
- flags & XFS_QMOPT_DQLOCK,
- ip->i_udquot, &ip->i_gdquot);
- /*
- * Don't worry about the udquot that we may have
- * attached above. It'll get detached, if not already.
- */
- if (error)
- goto done;
- nquotas++;
- }
-
- /*
- * Attach this group quota to the user quota as a hint.
- * This WON'T, in general, result in a thrash.
- */
- if (nquotas == 2) {
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(ip->i_udquot);
- ASSERT(ip->i_gdquot);
-
- /*
- * We may or may not have the i_udquot locked at this point,
- * but this check is OK since we don't depend on the i_gdquot to
- * be accurate 100% all the time. It is just a hint, and this
- * will succeed in general.
- */
- if (ip->i_udquot->q_gdquot == ip->i_gdquot)
- goto done;
- /*
- * Attach i_gdquot to the gdquot hint inside the i_udquot.
- */
- xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot,
- flags & XFS_QMOPT_DQLOCK);
- }
-
- done:
-
-#ifdef QUOTADEBUG
- if (! error) {
- if (ip->i_udquot) {
- if (flags & XFS_QMOPT_DQLOCK)
- ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot));
- }
- if (ip->i_gdquot) {
- if (flags & XFS_QMOPT_DQLOCK)
- ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot));
- }
- if (XFS_IS_UQUOTA_ON(mp))
- ASSERT(ip->i_udquot);
- if (XFS_IS_OQUOTA_ON(mp))
- ASSERT(ip->i_gdquot);
- }
-#endif
-
- if (! (flags & XFS_QMOPT_ILOCKED))
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
-#ifdef QUOTADEBUG
- else
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-#endif
- return error;
-}
-
-/*
- * Release dquots (and their references) if any.
- * The inode should be locked EXCL except when this's called by
- * xfs_ireclaim.
- */
-void
-xfs_qm_dqdetach(
- xfs_inode_t *ip)
-{
- if (!(ip->i_udquot || ip->i_gdquot))
- return;
-
- ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
- ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
- if (ip->i_udquot) {
- xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
- xfs_qm_dqrele(ip->i_udquot);
- ip->i_udquot = NULL;
- }
- if (ip->i_gdquot) {
- xfs_dqtrace_entry_ino(ip->i_gdquot, "DQDETTACH", ip);
- xfs_qm_dqrele(ip->i_gdquot);
- ip->i_gdquot = NULL;
- }
-}
-
-/*
- * This is called by VFS_SYNC and flags arg determines the caller,
- * and its motives, as done in xfs_sync.
- *
- * vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31
- * syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25
- * umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA
- */
-
-int
-xfs_qm_sync(
- xfs_mount_t *mp,
- int flags)
-{
- int recl, restarts;
- xfs_dquot_t *dqp;
- uint flush_flags;
- boolean_t nowait;
- int error;
-
- if (! XFS_IS_QUOTA_ON(mp))
- return 0;
-
- restarts = 0;
- /*
- * We won't block unless we are asked to.
- */
- nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0);
-
- again:
- xfs_qm_mplist_lock(mp);
- /*
- * dqpurge_all() also takes the mplist lock and iterate thru all dquots
- * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
- * when we have the mplist lock, we know that dquots will be consistent
- * as long as we have it locked.
- */
- if (! XFS_IS_QUOTA_ON(mp)) {
- xfs_qm_mplist_unlock(mp);
- return 0;
- }
- FOREACH_DQUOT_IN_MP(dqp, mp) {
- /*
- * If this is vfs_sync calling, then skip the dquots that
- * don't 'seem' to be dirty. ie. don't acquire dqlock.
- * This is very similar to what xfs_sync does with inodes.
- */
- if (flags & SYNC_BDFLUSH) {
- if (! XFS_DQ_IS_DIRTY(dqp))
- continue;
- }
-
- if (nowait) {
- /*
- * Try to acquire the dquot lock. We are NOT out of
- * lock order, but we just don't want to wait for this
- * lock, unless somebody wanted us to.
- */
- if (! xfs_qm_dqlock_nowait(dqp))
- continue;
- } else {
- xfs_dqlock(dqp);
- }
-
- /*
- * Now, find out for sure if this dquot is dirty or not.
- */
- if (! XFS_DQ_IS_DIRTY(dqp)) {
- xfs_dqunlock(dqp);
- continue;
- }
-
- /* XXX a sentinel would be better */
- recl = XFS_QI_MPLRECLAIMS(mp);
- if (! xfs_qm_dqflock_nowait(dqp)) {
- if (nowait) {
- xfs_dqunlock(dqp);
- continue;
- }
- /*
- * If we can't grab the flush lock then if the caller
- * really wanted us to give this our best shot, so
- * see if we can give a push to the buffer before we wait
- * on the flush lock. At this point, we know that
- * even though the dquot is being flushed,
- * it has (new) dirty data.
- */
- xfs_qm_dqflock_pushbuf_wait(dqp);
- }
- /*
- * Let go of the mplist lock. We don't want to hold it
- * across a disk write
- */
- flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC;
- xfs_qm_mplist_unlock(mp);
- xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
- error = xfs_qm_dqflush(dqp, flush_flags);
- xfs_dqunlock(dqp);
- if (error && XFS_FORCED_SHUTDOWN(mp))
- return 0; /* Need to prevent umount failure */
- else if (error)
- return error;
-
- xfs_qm_mplist_lock(mp);
- if (recl != XFS_QI_MPLRECLAIMS(mp)) {
- if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
- break;
-
- xfs_qm_mplist_unlock(mp);
- goto again;
- }
- }
-
- xfs_qm_mplist_unlock(mp);
- return 0;
-}
-
-
-/*
- * This initializes all the quota information that's kept in the
- * mount structure
- */
-STATIC int
-xfs_qm_init_quotainfo(
- xfs_mount_t *mp)
-{
- xfs_quotainfo_t *qinf;
- int error;
- xfs_dquot_t *dqp;
-
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
-
- /*
- * Tell XQM that we exist as soon as possible.
- */
- if ((error = xfs_qm_hold_quotafs_ref(mp))) {
- return error;
- }
-
- qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
-
- /*
- * See if quotainodes are setup, and if not, allocate them,
- * and change the superblock accordingly.
- */
- if ((error = xfs_qm_init_quotainos(mp))) {
- kmem_free(qinf, sizeof(xfs_quotainfo_t));
- mp->m_quotainfo = NULL;
- return error;
- }
-
- spin_lock_init(&qinf->qi_pinlock);
- xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
- qinf->qi_dqreclaims = 0;
-
- /* mutex used to serialize quotaoffs */
- mutex_init(&qinf->qi_quotaofflock);
-
- /* Precalc some constants */
- qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
- ASSERT(qinf->qi_dqchunklen);
- qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
- do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
-
- mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
-
- /*
- * We try to get the limits from the superuser's limits fields.
- * This is quite hacky, but it is standard quota practice.
- * We look at the USR dquot with id == 0 first, but if user quotas
- * are not enabled we goto the GRP dquot with id == 0.
- * We don't really care to keep separate default limits for user
- * and group quotas, at least not at this point.
- */
- error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,
- XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
- (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
- XFS_DQ_PROJ),
- XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,
- &dqp);
- if (! error) {
- xfs_disk_dquot_t *ddqp = &dqp->q_core;
-
- /*
- * The warnings and timers set the grace period given to
- * a user or group before he or she can not perform any
- * more writing. If it is zero, a default is used.
- */
- qinf->qi_btimelimit = ddqp->d_btimer ?
- be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
- qinf->qi_itimelimit = ddqp->d_itimer ?
- be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
- qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
- be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
- qinf->qi_bwarnlimit = ddqp->d_bwarns ?
- be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
- qinf->qi_iwarnlimit = ddqp->d_iwarns ?
- be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
- qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
- be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
- qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
- qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
- qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
- qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
- qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
- qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
-
- /*
- * We sent the XFS_QMOPT_DQSUSER flag to dqget because
- * we don't want this dquot cached. We haven't done a
- * quotacheck yet, and quotacheck doesn't like incore dquots.
- */
- xfs_qm_dqdestroy(dqp);
- } else {
- qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
- qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
- qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
- qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
- qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
- qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
- }
-
- return 0;
-}
-
-
-/*
- * Gets called when unmounting a filesystem or when all quotas get
- * turned off.
- * This purges the quota inodes, destroys locks and frees itself.
- */
-void
-xfs_qm_destroy_quotainfo(
- xfs_mount_t *mp)
-{
- xfs_quotainfo_t *qi;
-
- qi = mp->m_quotainfo;
- ASSERT(qi != NULL);
- ASSERT(xfs_Gqm != NULL);
-
- /*
- * Release the reference that XQM kept, so that we know
- * when the XQM structure should be freed. We cannot assume
- * that xfs_Gqm is non-null after this point.
- */
- xfs_qm_rele_quotafs_ref(mp);
-
- spinlock_destroy(&qi->qi_pinlock);
- xfs_qm_list_destroy(&qi->qi_dqlist);
-
- if (qi->qi_uquotaip) {
- XFS_PURGE_INODE(qi->qi_uquotaip);
- qi->qi_uquotaip = NULL; /* paranoia */
- }
- if (qi->qi_gquotaip) {
- XFS_PURGE_INODE(qi->qi_gquotaip);
- qi->qi_gquotaip = NULL;
- }
- mutex_destroy(&qi->qi_quotaofflock);
- kmem_free(qi, sizeof(xfs_quotainfo_t));
- mp->m_quotainfo = NULL;
-}
-
-
-
-/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */
-
-/* ARGSUSED */
-STATIC void
-xfs_qm_list_init(
- xfs_dqlist_t *list,
- char *str,
- int n)
-{
- mutex_init(&list->qh_lock);
- list->qh_next = NULL;
- list->qh_version = 0;
- list->qh_nelems = 0;
-}
-
-STATIC void
-xfs_qm_list_destroy(
- xfs_dqlist_t *list)
-{
- mutex_destroy(&(list->qh_lock));
-}
-
-
-/*
- * Stripped down version of dqattach. This doesn't attach, or even look at the
- * dquots attached to the inode. The rationale is that there won't be any
- * attached at the time this is called from quotacheck.
- */
-STATIC int
-xfs_qm_dqget_noattach(
- xfs_inode_t *ip,
- xfs_dquot_t **O_udqpp,
- xfs_dquot_t **O_gdqpp)
-{
- int error;
- xfs_mount_t *mp;
- xfs_dquot_t *udqp, *gdqp;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- mp = ip->i_mount;
- udqp = NULL;
- gdqp = NULL;
-
- if (XFS_IS_UQUOTA_ON(mp)) {
- ASSERT(ip->i_udquot == NULL);
- /*
- * We want the dquot allocated if it doesn't exist.
- */
- if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER,
- XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN,
- &udqp))) {
- /*
- * Shouldn't be able to turn off quotas here.
- */
- ASSERT(error != ESRCH);
- ASSERT(error != ENOENT);
- return error;
- }
- ASSERT(udqp);
- }
-
- if (XFS_IS_OQUOTA_ON(mp)) {
- ASSERT(ip->i_gdquot == NULL);
- if (udqp)
- xfs_dqunlock(udqp);
- error = XFS_IS_GQUOTA_ON(mp) ?
- xfs_qm_dqget(mp, ip,
- ip->i_d.di_gid, XFS_DQ_GROUP,
- XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
- &gdqp) :
- xfs_qm_dqget(mp, ip,
- ip->i_d.di_projid, XFS_DQ_PROJ,
- XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
- &gdqp);
- if (error) {
- if (udqp)
- xfs_qm_dqrele(udqp);
- ASSERT(error != ESRCH);
- ASSERT(error != ENOENT);
- return error;
- }
- ASSERT(gdqp);
-
- /* Reacquire the locks in the right order */
- if (udqp) {
- if (! xfs_qm_dqlock_nowait(udqp)) {
- xfs_dqunlock(gdqp);
- xfs_dqlock(udqp);
- xfs_dqlock(gdqp);
- }
- }
- }
-
- *O_udqpp = udqp;
- *O_gdqpp = gdqp;
-
-#ifdef QUOTADEBUG
- if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp));
- if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp));
-#endif
- return 0;
-}
-
-/*
- * Create an inode and return with a reference already taken, but unlocked
- * This is how we create quota inodes
- */
-STATIC int
-xfs_qm_qino_alloc(
- xfs_mount_t *mp,
- xfs_inode_t **ip,
- __int64_t sbfields,
- uint flags)
-{
- xfs_trans_t *tp;
- int error;
- int committed;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
- if ((error = xfs_trans_reserve(tp,
- XFS_QM_QINOCREATE_SPACE_RES(mp),
- XFS_CREATE_LOG_RES(mp), 0,
- XFS_TRANS_PERM_LOG_RES,
- XFS_CREATE_LOG_COUNT))) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- if ((error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0,
- &xfs_zerocr, 0, 1, ip, &committed))) {
- xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
- XFS_TRANS_ABORT);
- return error;
- }
-
- /*
- * Keep an extra reference to this quota inode. This inode is
- * locked exclusively and joined to the transaction already.
- */
- ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL));
- VN_HOLD(XFS_ITOV((*ip)));
-
- /*
- * Make the changes in the superblock, and log those too.
- * sbfields arg may contain fields other than *QUOTINO;
- * VERSIONNUM for example.
- */
- spin_lock(&mp->m_sb_lock);
- if (flags & XFS_QMOPT_SBVERSION) {
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
- unsigned oldv = mp->m_sb.sb_versionnum;
-#endif
- ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
- ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
- XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
- (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
- XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
-
- xfs_sb_version_addquota(&mp->m_sb);
- mp->m_sb.sb_uquotino = NULLFSINO;
- mp->m_sb.sb_gquotino = NULLFSINO;
-
- /* qflags will get updated _after_ quotacheck */
- mp->m_sb.sb_qflags = 0;
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
- cmn_err(CE_NOTE,
- "Old superblock version %x, converting to %x.",
- oldv, mp->m_sb.sb_versionnum);
-#endif
- }
- if (flags & XFS_QMOPT_UQUOTA)
- mp->m_sb.sb_uquotino = (*ip)->i_ino;
- else
- mp->m_sb.sb_gquotino = (*ip)->i_ino;
- spin_unlock(&mp->m_sb_lock);
- xfs_mod_sb(tp, sbfields);
-
- if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
- xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!");
- return error;
- }
- return 0;
-}
-
-
-STATIC void
-xfs_qm_reset_dqcounts(
- xfs_mount_t *mp,
- xfs_buf_t *bp,
- xfs_dqid_t id,
- uint type)
-{
- xfs_disk_dquot_t *ddq;
- int j;
-
- xfs_buftrace("RESET DQUOTS", bp);
- /*
- * Reset all counters and timers. They'll be
- * started afresh by xfs_qm_quotacheck.
- */
-#ifdef DEBUG
- j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
- do_div(j, sizeof(xfs_dqblk_t));
- ASSERT(XFS_QM_DQPERBLK(mp) == j);
-#endif
- ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
- for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) {
- /*
- * Do a sanity check, and if needed, repair the dqblk. Don't
- * output any warnings because it's perfectly possible to
- * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
- */
- (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,
- "xfs_quotacheck");
- ddq->d_bcount = 0;
- ddq->d_icount = 0;
- ddq->d_rtbcount = 0;
- ddq->d_btimer = 0;
- ddq->d_itimer = 0;
- ddq->d_rtbtimer = 0;
- ddq->d_bwarns = 0;
- ddq->d_iwarns = 0;
- ddq->d_rtbwarns = 0;
- ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
- }
-}
-
-STATIC int
-xfs_qm_dqiter_bufs(
- xfs_mount_t *mp,
- xfs_dqid_t firstid,
- xfs_fsblock_t bno,
- xfs_filblks_t blkcnt,
- uint flags)
-{
- xfs_buf_t *bp;
- int error;
- int notcommitted;
- int incr;
- int type;
-
- ASSERT(blkcnt > 0);
- notcommitted = 0;
- incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
- XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
- type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
- (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
- error = 0;
-
- /*
- * Blkcnt arg can be a very big number, and might even be
- * larger than the log itself. So, we have to break it up into
- * manageable-sized transactions.
- * Note that we don't start a permanent transaction here; we might
- * not be able to get a log reservation for the whole thing up front,
- * and we don't really care to either, because we just discard
- * everything if we were to crash in the middle of this loop.
- */
- while (blkcnt--) {
- error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
- XFS_FSB_TO_DADDR(mp, bno),
- (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp);
- if (error)
- break;
-
- xfs_qm_reset_dqcounts(mp, bp, firstid, type);
- xfs_bdwrite(mp, bp);
- /*
- * goto the next block.
- */
- bno++;
- firstid += XFS_QM_DQPERBLK(mp);
- }
- return error;
-}
-
-/*
- * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
- * caller supplied function for every chunk of dquots that we find.
- */
-STATIC int
-xfs_qm_dqiterate(
- xfs_mount_t *mp,
- xfs_inode_t *qip,
- uint flags)
-{
- xfs_bmbt_irec_t *map;
- int i, nmaps; /* number of map entries */
- int error; /* return value */
- xfs_fileoff_t lblkno;
- xfs_filblks_t maxlblkcnt;
- xfs_dqid_t firstid;
- xfs_fsblock_t rablkno;
- xfs_filblks_t rablkcnt;
-
- error = 0;
- /*
- * This looks racy, but we can't keep an inode lock across a
- * trans_reserve. But, this gets called during quotacheck, and that
- * happens only at mount time which is single threaded.
- */
- if (qip->i_d.di_nblocks == 0)
- return 0;
-
- map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
-
- lblkno = 0;
- maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
- do {
- nmaps = XFS_DQITER_MAP_SIZE;
- /*
- * We aren't changing the inode itself. Just changing
- * some of its data. No new blocks are added here, and
- * the inode is never added to the transaction.
- */
- xfs_ilock(qip, XFS_ILOCK_SHARED);
- error = xfs_bmapi(NULL, qip, lblkno,
- maxlblkcnt - lblkno,
- XFS_BMAPI_METADATA,
- NULL,
- 0, map, &nmaps, NULL, NULL);
- xfs_iunlock(qip, XFS_ILOCK_SHARED);
- if (error)
- break;
-
- ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
- for (i = 0; i < nmaps; i++) {
- ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
- ASSERT(map[i].br_blockcount);
-
-
- lblkno += map[i].br_blockcount;
-
- if (map[i].br_startblock == HOLESTARTBLOCK)
- continue;
-
- firstid = (xfs_dqid_t) map[i].br_startoff *
- XFS_QM_DQPERBLK(mp);
- /*
- * Do a read-ahead on the next extent.
- */
- if ((i+1 < nmaps) &&
- (map[i+1].br_startblock != HOLESTARTBLOCK)) {
- rablkcnt = map[i+1].br_blockcount;
- rablkno = map[i+1].br_startblock;
- while (rablkcnt--) {
- xfs_baread(mp->m_ddev_targp,
- XFS_FSB_TO_DADDR(mp, rablkno),
- (int)XFS_QI_DQCHUNKLEN(mp));
- rablkno++;
- }
- }
- /*
- * Iterate thru all the blks in the extent and
- * reset the counters of all the dquots inside them.
- */
- if ((error = xfs_qm_dqiter_bufs(mp,
- firstid,
- map[i].br_startblock,
- map[i].br_blockcount,
- flags))) {
- break;
- }
- }
-
- if (error)
- break;
- } while (nmaps > 0);
-
- kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map));
-
- return error;
-}
-
-/*
- * Called by dqusage_adjust in doing a quotacheck.
- * Given the inode, and a dquot (either USR or GRP, doesn't matter),
- * this updates its incore copy as well as the buffer copy. This is
- * so that once the quotacheck is done, we can just log all the buffers,
- * as opposed to logging numerous updates to individual dquots.
- */
-STATIC void
-xfs_qm_quotacheck_dqadjust(
- xfs_dquot_t *dqp,
- xfs_qcnt_t nblks,
- xfs_qcnt_t rtblks)
-{
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
- xfs_dqtrace_entry(dqp, "QCHECK DQADJUST");
- /*
- * Adjust the inode count and the block count to reflect this inode's
- * resource usage.
- */
- be64_add_cpu(&dqp->q_core.d_icount, 1);
- dqp->q_res_icount++;
- if (nblks) {
- be64_add_cpu(&dqp->q_core.d_bcount, nblks);
- dqp->q_res_bcount += nblks;
- }
- if (rtblks) {
- be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
- dqp->q_res_rtbcount += rtblks;
- }
-
- /*
- * Set default limits, adjust timers (since we changed usages)
- */
- if (! XFS_IS_SUSER_DQUOT(dqp)) {
- xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
- xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
- }
-
- dqp->dq_flags |= XFS_DQ_DIRTY;
-}
-
-STATIC int
-xfs_qm_get_rtblks(
- xfs_inode_t *ip,
- xfs_qcnt_t *O_rtblks)
-{
- xfs_filblks_t rtblks; /* total rt blks */
- xfs_extnum_t idx; /* extent record index */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_extnum_t nextents; /* number of extent entries */
- int error;
-
- ASSERT(XFS_IS_REALTIME_INODE(ip));
- ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
- if (!(ifp->if_flags & XFS_IFEXTENTS)) {
- if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
- return error;
- }
- rtblks = 0;
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- for (idx = 0; idx < nextents; idx++)
- rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
- *O_rtblks = (xfs_qcnt_t)rtblks;
- return 0;
-}
-
-/*
- * callback routine supplied to bulkstat(). Given an inumber, find its
- * dquots and update them to account for resources taken by that inode.
- */
-/* ARGSUSED */
-STATIC int
-xfs_qm_dqusage_adjust(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode number to get data for */
- void __user *buffer, /* not used */
- int ubsize, /* not used */
- void *private_data, /* not used */
- xfs_daddr_t bno, /* starting block of inode cluster */
- int *ubused, /* not used */
- void *dip, /* on-disk inode pointer (not used) */
- int *res) /* result code value */
-{
- xfs_inode_t *ip;
- xfs_dquot_t *udqp, *gdqp;
- xfs_qcnt_t nblks, rtblks;
- int error;
-
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
-
- /*
- * rootino must have its resources accounted for, not so with the quota
- * inodes.
- */
- if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
- *res = BULKSTAT_RV_NOTHING;
- return XFS_ERROR(EINVAL);
- }
-
- /*
- * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
- * interface expects the inode to be exclusively locked because that's
- * the case in all other instances. It's OK that we do this because
- * quotacheck is done only at mount time.
- */
- if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) {
- *res = BULKSTAT_RV_NOTHING;
- return error;
- }
-
- /*
- * Obtain the locked dquots. In case of an error (eg. allocation
- * fails for ENOSPC), we return the negative of the error number
- * to bulkstat, so that it can get propagated to quotacheck() and
- * making us disable quotas for the file system.
- */
- if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
- xfs_iput(ip, XFS_ILOCK_EXCL);
- *res = BULKSTAT_RV_GIVEUP;
- return error;
- }
-
- rtblks = 0;
- if (! XFS_IS_REALTIME_INODE(ip)) {
- nblks = (xfs_qcnt_t)ip->i_d.di_nblocks;
- } else {
- /*
- * Walk thru the extent list and count the realtime blocks.
- */
- if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {
- xfs_iput(ip, XFS_ILOCK_EXCL);
- if (udqp)
- xfs_qm_dqput(udqp);
- if (gdqp)
- xfs_qm_dqput(gdqp);
- *res = BULKSTAT_RV_GIVEUP;
- return error;
- }
- nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
- }
- ASSERT(ip->i_delayed_blks == 0);
-
- /*
- * We can't release the inode while holding its dquot locks.
- * The inode can go into inactive and might try to acquire the dquotlocks.
- * So, just unlock here and do a vn_rele at the end.
- */
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
- /*
- * Add the (disk blocks and inode) resources occupied by this
- * inode to its dquots. We do this adjustment in the incore dquot,
- * and also copy the changes to its buffer.
- * We don't care about putting these changes in a transaction
- * envelope because if we crash in the middle of a 'quotacheck'
- * we have to start from the beginning anyway.
- * Once we're done, we'll log all the dquot bufs.
- *
- * The *QUOTA_ON checks below may look pretty racy, but quotachecks
- * and quotaoffs don't race. (Quotachecks happen at mount time only).
- */
- if (XFS_IS_UQUOTA_ON(mp)) {
- ASSERT(udqp);
- xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks);
- xfs_qm_dqput(udqp);
- }
- if (XFS_IS_OQUOTA_ON(mp)) {
- ASSERT(gdqp);
- xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks);
- xfs_qm_dqput(gdqp);
- }
- /*
- * Now release the inode. This will send it to 'inactive', and
- * possibly even free blocks.
- */
- IRELE(ip);
-
- /*
- * Goto next inode.
- */
- *res = BULKSTAT_RV_DIDONE;
- return 0;
-}
-
-/*
- * Walk thru all the filesystem inodes and construct a consistent view
- * of the disk quota world. If the quotacheck fails, disable quotas.
- */
-int
-xfs_qm_quotacheck(
- xfs_mount_t *mp)
-{
- int done, count, error;
- xfs_ino_t lastino;
- size_t structsz;
- xfs_inode_t *uip, *gip;
- uint flags;
-
- count = INT_MAX;
- structsz = 1;
- lastino = 0;
- flags = 0;
-
- ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp));
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
-
- /*
- * There should be no cached dquots. The (simplistic) quotacheck
- * algorithm doesn't like that.
- */
- ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0);
-
- cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
-
- /*
- * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
- * their counters to zero. We need a clean slate.
- * We don't log our changes till later.
- */
- if ((uip = XFS_QI_UQIP(mp))) {
- if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA)))
- goto error_return;
- flags |= XFS_UQUOTA_CHKD;
- }
-
- if ((gip = XFS_QI_GQIP(mp))) {
- if ((error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
- XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA)))
- goto error_return;
- flags |= XFS_OQUOTA_CHKD;
- }
-
- do {
- /*
- * Iterate thru all the inodes in the file system,
- * adjusting the corresponding dquot counters in core.
- */
- if ((error = xfs_bulkstat(mp, &lastino, &count,
- xfs_qm_dqusage_adjust, NULL,
- structsz, NULL, BULKSTAT_FG_IGET, &done)))
- break;
-
- } while (! done);
-
- /*
- * We've made all the changes that we need to make incore.
- * Flush them down to disk buffers if everything was updated
- * successfully.
- */
- if (!error)
- error = xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
-
- /*
- * We can get this error if we couldn't do a dquot allocation inside
- * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
- * dirty dquots that might be cached, we just want to get rid of them
- * and turn quotaoff. The dquots won't be attached to any of the inodes
- * at this point (because we intentionally didn't in dqget_noattach).
- */
- if (error) {
- xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
- goto error_return;
- }
-
- /*
- * We didn't log anything, because if we crashed, we'll have to
- * start the quotacheck from scratch anyway. However, we must make
- * sure that our dquot changes are secure before we put the
- * quotacheck'd stamp on the superblock. So, here we do a synchronous
- * flush.
- */
- XFS_bflush(mp->m_ddev_targp);
-
- /*
- * If one type of quotas is off, then it will lose its
- * quotachecked status, since we won't be doing accounting for
- * that type anymore.
- */
- mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
- mp->m_qflags |= flags;
-
- XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
-
- error_return:
- if (error) {
- cmn_err(CE_WARN, "XFS quotacheck %s: Unsuccessful (Error %d): "
- "Disabling quotas.",
- mp->m_fsname, error);
- /*
- * We must turn off quotas.
- */
- ASSERT(mp->m_quotainfo != NULL);
- ASSERT(xfs_Gqm != NULL);
- xfs_qm_destroy_quotainfo(mp);
- if (xfs_mount_reset_sbqflags(mp)) {
- cmn_err(CE_WARN, "XFS quotacheck %s: "
- "Failed to reset quota flags.", mp->m_fsname);
- }
- } else {
- cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
- }
- return (error);
-}
-
-/*
- * This is called after the superblock has been read in and we're ready to
- * iget the quota inodes.
- */
-STATIC int
-xfs_qm_init_quotainos(
- xfs_mount_t *mp)
-{
- xfs_inode_t *uip, *gip;
- int error;
- __int64_t sbflags;
- uint flags;
-
- ASSERT(mp->m_quotainfo);
- uip = gip = NULL;
- sbflags = 0;
- flags = 0;
-
- /*
- * Get the uquota and gquota inodes
- */
- if (xfs_sb_version_hasquota(&mp->m_sb)) {
- if (XFS_IS_UQUOTA_ON(mp) &&
- mp->m_sb.sb_uquotino != NULLFSINO) {
- ASSERT(mp->m_sb.sb_uquotino > 0);
- if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip, 0)))
- return XFS_ERROR(error);
- }
- if (XFS_IS_OQUOTA_ON(mp) &&
- mp->m_sb.sb_gquotino != NULLFSINO) {
- ASSERT(mp->m_sb.sb_gquotino > 0);
- if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip, 0))) {
- if (uip)
- IRELE(uip);
- return XFS_ERROR(error);
- }
- }
- } else {
- flags |= XFS_QMOPT_SBVERSION;
- sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
- XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
- }
-
- /*
- * Create the two inodes, if they don't exist already. The changes
- * made above will get added to a transaction and logged in one of
- * the qino_alloc calls below. If the device is readonly,
- * temporarily switch to read-write to do this.
- */
- if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
- if ((error = xfs_qm_qino_alloc(mp, &uip,
- sbflags | XFS_SB_UQUOTINO,
- flags | XFS_QMOPT_UQUOTA)))
- return XFS_ERROR(error);
-
- flags &= ~XFS_QMOPT_SBVERSION;
- }
- if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
- flags |= (XFS_IS_GQUOTA_ON(mp) ?
- XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
- error = xfs_qm_qino_alloc(mp, &gip,
- sbflags | XFS_SB_GQUOTINO, flags);
- if (error) {
- if (uip)
- IRELE(uip);
-
- return XFS_ERROR(error);
- }
- }
-
- XFS_QI_UQIP(mp) = uip;
- XFS_QI_GQIP(mp) = gip;
-
- return 0;
-}
-
-
-/*
- * Traverse the freelist of dquots and attempt to reclaim a maximum of
- * 'howmany' dquots. This operation races with dqlookup(), and attempts to
- * favor the lookup function ...
- * XXXsup merge this with qm_reclaim_one().
- */
-STATIC int
-xfs_qm_shake_freelist(
- int howmany)
-{
- int nreclaimed;
- xfs_dqhash_t *hash;
- xfs_dquot_t *dqp, *nextdqp;
- int restarts;
- int nflushes;
-
- if (howmany <= 0)
- return 0;
-
- nreclaimed = 0;
- restarts = 0;
- nflushes = 0;
-
-#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);
-#endif
- /* lock order is : hashchainlock, freelistlock, mplistlock */
- tryagain:
- xfs_qm_freelist_lock(xfs_Gqm);
-
- for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
- ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&
- nreclaimed < howmany); ) {
- xfs_dqlock(dqp);
-
- /*
- * We are racing with dqlookup here. Naturally we don't
- * want to reclaim a dquot that lookup wants.
- */
- if (dqp->dq_flags & XFS_DQ_WANT) {
- xfs_dqunlock(dqp);
- xfs_qm_freelist_unlock(xfs_Gqm);
- if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
- return nreclaimed;
- XQM_STATS_INC(xqmstats.xs_qm_dqwants);
- goto tryagain;
- }
-
- /*
- * If the dquot is inactive, we are assured that it is
- * not on the mplist or the hashlist, and that makes our
- * life easier.
- */
- if (dqp->dq_flags & XFS_DQ_INACTIVE) {
- ASSERT(dqp->q_mount == NULL);
- ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(dqp->HL_PREVP == NULL);
- ASSERT(dqp->MPL_PREVP == NULL);
- XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
- nextdqp = dqp->dq_flnext;
- goto off_freelist;
- }
-
- ASSERT(dqp->MPL_PREVP);
- /*
- * Try to grab the flush lock. If this dquot is in the process of
- * getting flushed to disk, we don't want to reclaim it.
- */
- if (! xfs_qm_dqflock_nowait(dqp)) {
- xfs_dqunlock(dqp);
- dqp = dqp->dq_flnext;
- continue;
- }
-
- /*
- * We have the flush lock so we know that this is not in the
- * process of being flushed. So, if this is dirty, flush it
- * DELWRI so that we don't get a freelist infested with
- * dirty dquots.
- */
- if (XFS_DQ_IS_DIRTY(dqp)) {
- int error;
- xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY");
- /*
- * We flush it delayed write, so don't bother
- * releasing the mplock.
- */
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
- if (error) {
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dqflush_all: dquot %p flush failed", dqp);
- }
- xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
- dqp = dqp->dq_flnext;
- continue;
- }
- /*
- * We're trying to get the hashlock out of order. This races
- * with dqlookup; so, we giveup and goto the next dquot if
- * we couldn't get the hashlock. This way, we won't starve
- * a dqlookup process that holds the hashlock that is
- * waiting for the freelist lock.
- */
- if (! xfs_qm_dqhashlock_nowait(dqp)) {
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- dqp = dqp->dq_flnext;
- continue;
- }
- /*
- * This races with dquot allocation code as well as dqflush_all
- * and reclaim code. So, if we failed to grab the mplist lock,
- * giveup everything and start over.
- */
- hash = dqp->q_hash;
- ASSERT(hash);
- if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
- /* XXX put a sentinel so that we can come back here */
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- XFS_DQ_HASH_UNLOCK(hash);
- xfs_qm_freelist_unlock(xfs_Gqm);
- if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
- return nreclaimed;
- goto tryagain;
- }
- xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
-#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
- dqp, be32_to_cpu(dqp->q_core.d_id));
-#endif
- ASSERT(dqp->q_nrefs == 0);
- nextdqp = dqp->dq_flnext;
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
- XQM_HASHLIST_REMOVE(hash, dqp);
- xfs_dqfunlock(dqp);
- xfs_qm_mplist_unlock(dqp->q_mount);
- XFS_DQ_HASH_UNLOCK(hash);
-
- off_freelist:
- XQM_FREELIST_REMOVE(dqp);
- xfs_dqunlock(dqp);
- nreclaimed++;
- XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims);
- xfs_qm_dqdestroy(dqp);
- dqp = nextdqp;
- }
- xfs_qm_freelist_unlock(xfs_Gqm);
- return nreclaimed;
-}
-
-
-/*
- * The kmem_shake interface is invoked when memory is running low.
- */
-/* ARGSUSED */
-STATIC int
-xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
-{
- int ndqused, nfree, n;
-
- if (!kmem_shake_allow(gfp_mask))
- return 0;
- if (!xfs_Gqm)
- return 0;
-
- nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
- /* incore dquots in all f/s's */
- ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
-
- ASSERT(ndqused >= 0);
-
- if (nfree <= ndqused && nfree < ndquot)
- return 0;
-
- ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */
- n = nfree - ndqused - ndquot; /* # over target */
-
- return xfs_qm_shake_freelist(MAX(nfree, n));
-}
-
-
-/*
- * Just pop the least recently used dquot off the freelist and
- * recycle it. The returned dquot is locked.
- */
-STATIC xfs_dquot_t *
-xfs_qm_dqreclaim_one(void)
-{
- xfs_dquot_t *dqpout;
- xfs_dquot_t *dqp;
- int restarts;
- int nflushes;
-
- restarts = 0;
- dqpout = NULL;
- nflushes = 0;
-
- /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
- startagain:
- xfs_qm_freelist_lock(xfs_Gqm);
-
- FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
- xfs_dqlock(dqp);
-
- /*
- * We are racing with dqlookup here. Naturally we don't
- * want to reclaim a dquot that lookup wants. We release the
- * freelist lock and start over, so that lookup will grab
- * both the dquot and the freelistlock.
- */
- if (dqp->dq_flags & XFS_DQ_WANT) {
- ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
- xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT");
- xfs_dqunlock(dqp);
- xfs_qm_freelist_unlock(xfs_Gqm);
- if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
- return NULL;
- XQM_STATS_INC(xqmstats.xs_qm_dqwants);
- goto startagain;
- }
-
- /*
- * If the dquot is inactive, we are assured that it is
- * not on the mplist or the hashlist, and that makes our
- * life easier.
- */
- if (dqp->dq_flags & XFS_DQ_INACTIVE) {
- ASSERT(dqp->q_mount == NULL);
- ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(dqp->HL_PREVP == NULL);
- ASSERT(dqp->MPL_PREVP == NULL);
- XQM_FREELIST_REMOVE(dqp);
- xfs_dqunlock(dqp);
- dqpout = dqp;
- XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
- break;
- }
-
- ASSERT(dqp->q_hash);
- ASSERT(dqp->MPL_PREVP);
-
- /*
- * Try to grab the flush lock. If this dquot is in the process of
- * getting flushed to disk, we don't want to reclaim it.
- */
- if (! xfs_qm_dqflock_nowait(dqp)) {
- xfs_dqunlock(dqp);
- continue;
- }
-
- /*
- * We have the flush lock so we know that this is not in the
- * process of being flushed. So, if this is dirty, flush it
- * DELWRI so that we don't get a freelist infested with
- * dirty dquots.
- */
- if (XFS_DQ_IS_DIRTY(dqp)) {
- int error;
- xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY");
- /*
- * We flush it delayed write, so don't bother
- * releasing the freelist lock.
- */
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
- if (error) {
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
- }
- xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
- continue;
- }
-
- if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- continue;
- }
-
- if (! xfs_qm_dqhashlock_nowait(dqp))
- goto mplistunlock;
-
- ASSERT(dqp->q_nrefs == 0);
- xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING");
- XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
- XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
- XQM_FREELIST_REMOVE(dqp);
- dqpout = dqp;
- XFS_DQ_HASH_UNLOCK(dqp->q_hash);
- mplistunlock:
- xfs_qm_mplist_unlock(dqp->q_mount);
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- if (dqpout)
- break;
- }
-
- xfs_qm_freelist_unlock(xfs_Gqm);
- return dqpout;
-}
-
-
-/*------------------------------------------------------------------*/
-
-/*
- * Return a new incore dquot. Depending on the number of
- * dquots in the system, we either allocate a new one on the kernel heap,
- * or reclaim a free one.
- * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
- * to reclaim an existing one from the freelist.
- */
-boolean_t
-xfs_qm_dqalloc_incore(
- xfs_dquot_t **O_dqpp)
-{
- xfs_dquot_t *dqp;
-
- /*
- * Check against high water mark to see if we want to pop
- * a nincompoop dquot off the freelist.
- */
- if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) {
- /*
- * Try to recycle a dquot from the freelist.
- */
- if ((dqp = xfs_qm_dqreclaim_one())) {
- XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
- /*
- * Just zero the core here. The rest will get
- * reinitialized by caller. XXX we shouldn't even
- * do this zero ...
- */
- memset(&dqp->q_core, 0, sizeof(dqp->q_core));
- *O_dqpp = dqp;
- return B_FALSE;
- }
- XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
- }
-
- /*
- * Allocate a brand new dquot on the kernel heap and return it
- * to the caller to initialize.
- */
- ASSERT(xfs_Gqm->qm_dqzone != NULL);
- *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
- atomic_inc(&xfs_Gqm->qm_totaldquots);
-
- return B_TRUE;
-}
-
-
-/*
- * Start a transaction and write the incore superblock changes to
- * disk. flags parameter indicates which fields have changed.
- */
-int
-xfs_qm_write_sb_changes(
- xfs_mount_t *mp,
- __int64_t flags)
-{
- xfs_trans_t *tp;
- int error;
-
-#ifdef QUOTADEBUG
- cmn_err(CE_NOTE, "Writing superblock quota changes :%s", mp->m_fsname);
-#endif
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
- if ((error = xfs_trans_reserve(tp, 0,
- mp->m_sb.sb_sectsize + 128, 0,
- 0,
- XFS_DEFAULT_LOG_COUNT))) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- xfs_mod_sb(tp, flags);
- error = xfs_trans_commit(tp, 0);
-
- return error;
-}
-
-
-/* --------------- utility functions for vnodeops ---------------- */
-
-
-/*
- * Given an inode, a uid and gid (from cred_t) make sure that we have
- * allocated relevant dquot(s) on disk, and that we won't exceed inode
- * quotas by creating this file.
- * This also attaches dquot(s) to the given inode after locking it,
- * and returns the dquots corresponding to the uid and/or gid.
- *
- * in : inode (unlocked)
- * out : udquot, gdquot with references taken and unlocked
- */
-int
-xfs_qm_vop_dqalloc(
- xfs_mount_t *mp,
- xfs_inode_t *ip,
- uid_t uid,
- gid_t gid,
- prid_t prid,
- uint flags,
- xfs_dquot_t **O_udqpp,
- xfs_dquot_t **O_gdqpp)
-{
- int error;
- xfs_dquot_t *uq, *gq;
- uint lockflags;
-
- if (!XFS_IS_QUOTA_ON(mp))
- return 0;
-
- lockflags = XFS_ILOCK_EXCL;
- xfs_ilock(ip, lockflags);
-
- if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
- gid = ip->i_d.di_gid;
-
- /*
- * Attach the dquot(s) to this inode, doing a dquot allocation
- * if necessary. The dquot(s) will not be locked.
- */
- if (XFS_NOT_DQATTACHED(mp, ip)) {
- if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC |
- XFS_QMOPT_ILOCKED))) {
- xfs_iunlock(ip, lockflags);
- return error;
- }
- }
-
- uq = gq = NULL;
- if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
- if (ip->i_d.di_uid != uid) {
- /*
- * What we need is the dquot that has this uid, and
- * if we send the inode to dqget, the uid of the inode
- * takes priority over what's sent in the uid argument.
- * We must unlock inode here before calling dqget if
- * we're not sending the inode, because otherwise
- * we'll deadlock by doing trans_reserve while
- * holding ilock.
- */
- xfs_iunlock(ip, lockflags);
- if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
- XFS_DQ_USER,
- XFS_QMOPT_DQALLOC |
- XFS_QMOPT_DOWARN,
- &uq))) {
- ASSERT(error != ENOENT);
- return error;
- }
- /*
- * Get the ilock in the right order.
- */
- xfs_dqunlock(uq);
- lockflags = XFS_ILOCK_SHARED;
- xfs_ilock(ip, lockflags);
- } else {
- /*
- * Take an extra reference, because we'll return
- * this to caller
- */
- ASSERT(ip->i_udquot);
- uq = ip->i_udquot;
- xfs_dqlock(uq);
- XFS_DQHOLD(uq);
- xfs_dqunlock(uq);
- }
- }
- if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
- if (ip->i_d.di_gid != gid) {
- xfs_iunlock(ip, lockflags);
- if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
- XFS_DQ_GROUP,
- XFS_QMOPT_DQALLOC |
- XFS_QMOPT_DOWARN,
- &gq))) {
- if (uq)
- xfs_qm_dqrele(uq);
- ASSERT(error != ENOENT);
- return error;
- }
- xfs_dqunlock(gq);
- lockflags = XFS_ILOCK_SHARED;
- xfs_ilock(ip, lockflags);
- } else {
- ASSERT(ip->i_gdquot);
- gq = ip->i_gdquot;
- xfs_dqlock(gq);
- XFS_DQHOLD(gq);
- xfs_dqunlock(gq);
- }
- } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
- if (ip->i_d.di_projid != prid) {
- xfs_iunlock(ip, lockflags);
- if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
- XFS_DQ_PROJ,
- XFS_QMOPT_DQALLOC |
- XFS_QMOPT_DOWARN,
- &gq))) {
- if (uq)
- xfs_qm_dqrele(uq);
- ASSERT(error != ENOENT);
- return (error);
- }
- xfs_dqunlock(gq);
- lockflags = XFS_ILOCK_SHARED;
- xfs_ilock(ip, lockflags);
- } else {
- ASSERT(ip->i_gdquot);
- gq = ip->i_gdquot;
- xfs_dqlock(gq);
- XFS_DQHOLD(gq);
- xfs_dqunlock(gq);
- }
- }
- if (uq)
- xfs_dqtrace_entry_ino(uq, "DQALLOC", ip);
-
- xfs_iunlock(ip, lockflags);
- if (O_udqpp)
- *O_udqpp = uq;
- else if (uq)
- xfs_qm_dqrele(uq);
- if (O_gdqpp)
- *O_gdqpp = gq;
- else if (gq)
- xfs_qm_dqrele(gq);
- return 0;
-}
-
-/*
- * Actually transfer ownership, and do dquot modifications.
- * These were already reserved.
- */
-xfs_dquot_t *
-xfs_qm_vop_chown(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- xfs_dquot_t **IO_olddq,
- xfs_dquot_t *newdq)
-{
- xfs_dquot_t *prevdq;
- uint bfield = XFS_IS_REALTIME_INODE(ip) ?
- XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
-
- /* old dquot */
- prevdq = *IO_olddq;
- ASSERT(prevdq);
- ASSERT(prevdq != newdq);
-
- xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
- xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
-
- /* the sparkling new dquot */
- xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
- xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
-
- /*
- * Take an extra reference, because the inode
- * is going to keep this dquot pointer even
- * after the trans_commit.
- */
- xfs_dqlock(newdq);
- XFS_DQHOLD(newdq);
- xfs_dqunlock(newdq);
- *IO_olddq = newdq;
-
- return prevdq;
-}
-
-/*
- * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
- */
-int
-xfs_qm_vop_chown_reserve(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- xfs_dquot_t *udqp,
- xfs_dquot_t *gdqp,
- uint flags)
-{
- int error;
- xfs_mount_t *mp;
- uint delblks, blkflags, prjflags = 0;
- xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
- mp = ip->i_mount;
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
-
- delblks = ip->i_delayed_blks;
- delblksudq = delblksgdq = unresudq = unresgdq = NULL;
- blkflags = XFS_IS_REALTIME_INODE(ip) ?
- XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
-
- if (XFS_IS_UQUOTA_ON(mp) && udqp &&
- ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
- delblksudq = udqp;
- /*
- * If there are delayed allocation blocks, then we have to
- * unreserve those from the old dquot, and add them to the
- * new dquot.
- */
- if (delblks) {
- ASSERT(ip->i_udquot);
- unresudq = ip->i_udquot;
- }
- }
- if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
- if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
- ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id))
- prjflags = XFS_QMOPT_ENOSPC;
-
- if (prjflags ||
- (XFS_IS_GQUOTA_ON(ip->i_mount) &&
- ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
- delblksgdq = gdqp;
- if (delblks) {
- ASSERT(ip->i_gdquot);
- unresgdq = ip->i_gdquot;
- }
- }
- }
-
- if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
- delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
- flags | blkflags | prjflags)))
- return (error);
-
- /*
- * Do the delayed blks reservations/unreservations now. Since, these
- * are done without the help of a transaction, if a reservation fails
- * its previous reservations won't be automatically undone by trans
- * code. So, we have to do it manually here.
- */
- if (delblks) {
- /*
- * Do the reservations first. Unreservation can't fail.
- */
- ASSERT(delblksudq || delblksgdq);
- ASSERT(unresudq || unresgdq);
- if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
- delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
- flags | blkflags | prjflags)))
- return (error);
- xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
- unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
- blkflags);
- }
-
- return (0);
-}
-
-int
-xfs_qm_vop_rename_dqattach(
- xfs_inode_t **i_tab)
-{
- xfs_inode_t *ip;
- int i;
- int error;
-
- ip = i_tab[0];
-
- if (! XFS_IS_QUOTA_ON(ip->i_mount))
- return 0;
-
- if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
- error = xfs_qm_dqattach(ip, 0);
- if (error)
- return error;
- }
- for (i = 1; (i < 4 && i_tab[i]); i++) {
- /*
- * Watch out for duplicate entries in the table.
- */
- if ((ip = i_tab[i]) != i_tab[i-1]) {
- if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
- error = xfs_qm_dqattach(ip, 0);
- if (error)
- return error;
- }
- }
- }
- return 0;
-}
-
-void
-xfs_qm_vop_dqattach_and_dqmod_newinode(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- xfs_dquot_t *udqp,
- xfs_dquot_t *gdqp)
-{
- if (!XFS_IS_QUOTA_ON(tp->t_mountp))
- return;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
-
- if (udqp) {
- xfs_dqlock(udqp);
- XFS_DQHOLD(udqp);
- xfs_dqunlock(udqp);
- ASSERT(ip->i_udquot == NULL);
- ip->i_udquot = udqp;
- ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp));
- ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
- xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
- }
- if (gdqp) {
- xfs_dqlock(gdqp);
- XFS_DQHOLD(gdqp);
- xfs_dqunlock(gdqp);
- ASSERT(ip->i_gdquot == NULL);
- ip->i_gdquot = gdqp;
- ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp));
- ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ?
- ip->i_d.di_gid : ip->i_d.di_projid) ==
- be32_to_cpu(gdqp->q_core.d_id));
- xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
- }
-}
-
-/* ------------- list stuff -----------------*/
-STATIC void
-xfs_qm_freelist_init(xfs_frlist_t *ql)
-{
- ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
- mutex_init(&ql->qh_lock);
- ql->qh_version = 0;
- ql->qh_nelems = 0;
-}
-
-STATIC void
-xfs_qm_freelist_destroy(xfs_frlist_t *ql)
-{
- xfs_dquot_t *dqp, *nextdqp;
-
- mutex_lock(&ql->qh_lock);
- for (dqp = ql->qh_next;
- dqp != (xfs_dquot_t *)ql; ) {
- xfs_dqlock(dqp);
- nextdqp = dqp->dq_flnext;
-#ifdef QUOTADEBUG
- cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
-#endif
- XQM_FREELIST_REMOVE(dqp);
- xfs_dqunlock(dqp);
- xfs_qm_dqdestroy(dqp);
- dqp = nextdqp;
- }
- mutex_unlock(&ql->qh_lock);
- mutex_destroy(&ql->qh_lock);
-
- ASSERT(ql->qh_nelems == 0);
-}
-
-STATIC void
-xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq)
-{
- dq->dq_flnext = ql->qh_next;
- dq->dq_flprev = (xfs_dquot_t *)ql;
- ql->qh_next = dq;
- dq->dq_flnext->dq_flprev = dq;
- xfs_Gqm->qm_dqfreelist.qh_nelems++;
- xfs_Gqm->qm_dqfreelist.qh_version++;
-}
-
-void
-xfs_qm_freelist_unlink(xfs_dquot_t *dq)
-{
- xfs_dquot_t *next = dq->dq_flnext;
- xfs_dquot_t *prev = dq->dq_flprev;
-
- next->dq_flprev = prev;
- prev->dq_flnext = next;
- dq->dq_flnext = dq->dq_flprev = dq;
- xfs_Gqm->qm_dqfreelist.qh_nelems--;
- xfs_Gqm->qm_dqfreelist.qh_version++;
-}
-
-void
-xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
-{
- xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
-}
-
-STATIC int
-xfs_qm_dqhashlock_nowait(
- xfs_dquot_t *dqp)
-{
- int locked;
-
- locked = mutex_trylock(&((dqp)->q_hash->qh_lock));
- return locked;
-}
-
-int
-xfs_qm_freelist_lock_nowait(
- xfs_qm_t *xqm)
-{
- int locked;
-
- locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock));
- return locked;
-}
-
-STATIC int
-xfs_qm_mplist_nowait(
- xfs_mount_t *mp)
-{
- int locked;
-
- ASSERT(mp->m_quotainfo);
- locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp)));
- return locked;
-}
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
deleted file mode 100644
index cd2300e374a..00000000000
--- a/fs/xfs/quota/xfs_qm.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_QM_H__
-#define __XFS_QM_H__
-
-#include "xfs_dquot_item.h"
-#include "xfs_dquot.h"
-#include "xfs_quota_priv.h"
-#include "xfs_qm_stats.h"
-
-struct xfs_qm;
-struct xfs_inode;
-
-extern uint ndquot;
-extern mutex_t xfs_Gqm_lock;
-extern struct xfs_qm *xfs_Gqm;
-extern kmem_zone_t *qm_dqzone;
-extern kmem_zone_t *qm_dqtrxzone;
-
-/*
- * Used in xfs_qm_sync called by xfs_sync to count the max times that it can
- * iterate over the mountpt's dquot list in one call.
- */
-#define XFS_QM_SYNC_MAX_RESTARTS 7
-
-/*
- * Ditto, for xfs_qm_dqreclaim_one.
- */
-#define XFS_QM_RECLAIM_MAX_RESTARTS 4
-
-/*
- * Ideal ratio of free to in use dquots. Quota manager makes an attempt
- * to keep this balance.
- */
-#define XFS_QM_DQFREE_RATIO 2
-
-/*
- * Dquot hashtable constants/threshold values.
- */
-#define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t))
-#define XFS_QM_HASHSIZE_HIGH ((PAGE_SIZE * 4) / sizeof(xfs_dqhash_t))
-
-/*
- * This defines the unit of allocation of dquots.
- * Currently, it is just one file system block, and a 4K blk contains 30
- * (136 * 30 = 4080) dquots. It's probably not worth trying to make
- * this more dynamic.
- * XXXsup However, if this number is changed, we have to make sure that we don't
- * implicitly assume that we do allocations in chunks of a single filesystem
- * block in the dquot/xqm code.
- */
-#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1
-/*
- * When doing a quotacheck, we log dquot clusters of this many FSBs at most
- * in a single transaction. We don't want to ask for too huge a log reservation.
- */
-#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3
-
-typedef xfs_dqhash_t xfs_dqlist_t;
-/*
- * The freelist head. The first two fields match the first two in the
- * xfs_dquot_t structure (in xfs_dqmarker_t)
- */
-typedef struct xfs_frlist {
- struct xfs_dquot *qh_next;
- struct xfs_dquot *qh_prev;
- mutex_t qh_lock;
- uint qh_version;
- uint qh_nelems;
-} xfs_frlist_t;
-
-/*
- * Quota Manager (global) structure. Lives only in core.
- */
-typedef struct xfs_qm {
- xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */
- xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */
- uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */
- xfs_frlist_t qm_dqfreelist; /* freelist of dquots */
- atomic_t qm_totaldquots; /* total incore dquots */
- uint qm_nrefs; /* file systems with quota on */
- int qm_dqfree_ratio;/* ratio of free to inuse dquots */
- kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */
- kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */
-} xfs_qm_t;
-
-/*
- * Various quota information for individual filesystems.
- * The mount structure keeps a pointer to this.
- */
-typedef struct xfs_quotainfo {
- xfs_inode_t *qi_uquotaip; /* user quota inode */
- xfs_inode_t *qi_gquotaip; /* group quota inode */
- spinlock_t qi_pinlock; /* dquot pinning lock */
- xfs_dqlist_t qi_dqlist; /* all dquots in filesys */
- int qi_dqreclaims; /* a change here indicates
- a removal in the dqlist */
- time_t qi_btimelimit; /* limit for blks timer */
- time_t qi_itimelimit; /* limit for inodes timer */
- time_t qi_rtbtimelimit;/* limit for rt blks timer */
- xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */
- xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */
- xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */
- mutex_t qi_quotaofflock;/* to serialize quotaoff */
- xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */
- uint qi_dqperchunk; /* # ondisk dqs in above chunk */
- xfs_qcnt_t qi_bhardlimit; /* default data blk hard limit */
- xfs_qcnt_t qi_bsoftlimit; /* default data blk soft limit */
- xfs_qcnt_t qi_ihardlimit; /* default inode count hard limit */
- xfs_qcnt_t qi_isoftlimit; /* default inode count soft limit */
- xfs_qcnt_t qi_rtbhardlimit;/* default realtime blk hard limit */
- xfs_qcnt_t qi_rtbsoftlimit;/* default realtime blk soft limit */
-} xfs_quotainfo_t;
-
-
-extern xfs_dqtrxops_t xfs_trans_dquot_ops;
-
-extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long);
-extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *,
- xfs_dquot_t *, xfs_dquot_t *, long, long, uint);
-extern void xfs_trans_dqjoin(xfs_trans_t *, xfs_dquot_t *);
-extern void xfs_trans_log_dquot(xfs_trans_t *, xfs_dquot_t *);
-
-/*
- * We keep the usr and grp dquots separately so that locking will be easier
- * to do at commit time. All transactions that we know of at this point
- * affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value.
- */
-#define XFS_QM_TRANS_MAXDQS 2
-typedef struct xfs_dquot_acct {
- xfs_dqtrx_t dqa_usrdquots[XFS_QM_TRANS_MAXDQS];
- xfs_dqtrx_t dqa_grpdquots[XFS_QM_TRANS_MAXDQS];
-} xfs_dquot_acct_t;
-
-/*
- * Users are allowed to have a usage exceeding their softlimit for
- * a period this long.
- */
-#define XFS_QM_BTIMELIMIT (7 * 24*60*60) /* 1 week */
-#define XFS_QM_RTBTIMELIMIT (7 * 24*60*60) /* 1 week */
-#define XFS_QM_ITIMELIMIT (7 * 24*60*60) /* 1 week */
-
-#define XFS_QM_BWARNLIMIT 5
-#define XFS_QM_IWARNLIMIT 5
-#define XFS_QM_RTBWARNLIMIT 5
-
-#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock))
-#define XFS_QM_UNLOCK(xqm) (mutex_unlock(&xqm##_lock))
-#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++)
-#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
-
-extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
-extern void xfs_qm_mount_quotas(xfs_mount_t *, int);
-extern int xfs_qm_quotacheck(xfs_mount_t *);
-extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *);
-extern int xfs_qm_unmount_quotas(xfs_mount_t *);
-extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
-extern int xfs_qm_sync(xfs_mount_t *, int);
-
-/* dquot stuff */
-extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **);
-extern int xfs_qm_dqattach(xfs_inode_t *, uint);
-extern void xfs_qm_dqdetach(xfs_inode_t *);
-extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
-extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
-
-/* vop stuff */
-extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *,
- uid_t, gid_t, prid_t, uint,
- xfs_dquot_t **, xfs_dquot_t **);
-extern void xfs_qm_vop_dqattach_and_dqmod_newinode(
- xfs_trans_t *, xfs_inode_t *,
- xfs_dquot_t *, xfs_dquot_t *);
-extern int xfs_qm_vop_rename_dqattach(xfs_inode_t **);
-extern xfs_dquot_t * xfs_qm_vop_chown(xfs_trans_t *, xfs_inode_t *,
- xfs_dquot_t **, xfs_dquot_t *);
-extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *,
- xfs_dquot_t *, xfs_dquot_t *, uint);
-
-/* list stuff */
-extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *);
-extern void xfs_qm_freelist_unlink(xfs_dquot_t *);
-extern int xfs_qm_freelist_lock_nowait(xfs_qm_t *);
-
-/* system call interface */
-extern int xfs_qm_quotactl(struct xfs_mount *, int, int,
- xfs_caddr_t);
-
-#ifdef DEBUG
-extern int xfs_qm_internalqcheck(xfs_mount_t *);
-#else
-#define xfs_qm_internalqcheck(mp) (0)
-#endif
-
-#endif /* __XFS_QM_H__ */
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
deleted file mode 100644
index f4f6c4c861d..00000000000
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_clnt.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
-#include "xfs_quota.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_ialloc.h"
-#include "xfs_itable.h"
-#include "xfs_btree.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
-#include "xfs_error.h"
-#include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_qm.h"
-
-
-STATIC void
-xfs_fill_statvfs_from_dquot(
- bhv_statvfs_t *statp,
- xfs_disk_dquot_t *dp)
-{
- __uint64_t limit;
-
- limit = dp->d_blk_softlimit ?
- be64_to_cpu(dp->d_blk_softlimit) :
- be64_to_cpu(dp->d_blk_hardlimit);
- if (limit && statp->f_blocks > limit) {
- statp->f_blocks = limit;
- statp->f_bfree =
- (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
- (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
- }
-
- limit = dp->d_ino_softlimit ?
- be64_to_cpu(dp->d_ino_softlimit) :
- be64_to_cpu(dp->d_ino_hardlimit);
- if (limit && statp->f_files > limit) {
- statp->f_files = limit;
- statp->f_ffree =
- (statp->f_files > be64_to_cpu(dp->d_icount)) ?
- (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0;
- }
-}
-
-
-/*
- * Directory tree accounting is implemented using project quotas, where
- * the project identifier is inherited from parent directories.
- * A statvfs (df, etc.) of a directory that is using project quota should
- * return a statvfs of the project, not the entire filesystem.
- * This makes such trees appear as if they are filesystems in themselves.
- */
-STATIC void
-xfs_qm_statvfs(
- xfs_inode_t *ip,
- bhv_statvfs_t *statp)
-{
- xfs_mount_t *mp = ip->i_mount;
- xfs_dquot_t *dqp;
-
- if (!(ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
- !((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
- (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
- return;
-
- if (!xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp)) {
- xfs_disk_dquot_t *dp = &dqp->q_core;
-
- xfs_fill_statvfs_from_dquot(statp, dp);
- xfs_qm_dqput(dqp);
- }
-}
-
-STATIC int
-xfs_qm_newmount(
- xfs_mount_t *mp,
- uint *needquotamount,
- uint *quotaflags)
-{
- uint quotaondisk;
- uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0;
-
- *quotaflags = 0;
- *needquotamount = B_FALSE;
-
- quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) &&
- (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT);
-
- if (quotaondisk) {
- uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT;
- pquotaondisk = mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT;
- gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT;
- }
-
- /*
- * If the device itself is read-only, we can't allow
- * the user to change the state of quota on the mount -
- * this would generate a transaction on the ro device,
- * which would lead to an I/O error and shutdown
- */
-
- if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
- (!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) ||
- (pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) ||
- (!pquotaondisk && XFS_IS_PQUOTA_ON(mp)) ||
- (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
- (!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) &&
- xfs_dev_is_read_only(mp, "changing quota state")) {
- cmn_err(CE_WARN,
- "XFS: please mount with%s%s%s%s.",
- (!quotaondisk ? "out quota" : ""),
- (uquotaondisk ? " usrquota" : ""),
- (pquotaondisk ? " prjquota" : ""),
- (gquotaondisk ? " grpquota" : ""));
- return XFS_ERROR(EPERM);
- }
-
- if (XFS_IS_QUOTA_ON(mp) || quotaondisk) {
- /*
- * Call mount_quotas at this point only if we won't have to do
- * a quotacheck.
- */
- if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
- /*
- * If an error occured, qm_mount_quotas code
- * has already disabled quotas. So, just finish
- * mounting, and get on with the boring life
- * without disk quotas.
- */
- xfs_qm_mount_quotas(mp, 0);
- } else {
- /*
- * Clear the quota flags, but remember them. This
- * is so that the quota code doesn't get invoked
- * before we're ready. This can happen when an
- * inode goes inactive and wants to free blocks,
- * or via xfs_log_mount_finish.
- */
- *needquotamount = B_TRUE;
- *quotaflags = mp->m_qflags;
- mp->m_qflags = 0;
- }
- }
-
- return 0;
-}
-
-STATIC int
-xfs_qm_endmount(
- xfs_mount_t *mp,
- uint needquotamount,
- uint quotaflags,
- int mfsi_flags)
-{
- if (needquotamount) {
- ASSERT(mp->m_qflags == 0);
- mp->m_qflags = quotaflags;
- xfs_qm_mount_quotas(mp, mfsi_flags);
- }
-
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
- if (! (XFS_IS_QUOTA_ON(mp)))
- xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
- else
- xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
-#endif
-
-#ifdef QUOTADEBUG
- if (XFS_IS_QUOTA_ON(mp) && xfs_qm_internalqcheck(mp))
- cmn_err(CE_WARN, "XFS: mount internalqcheck failed");
-#endif
-
- return 0;
-}
-
-STATIC void
-xfs_qm_dqrele_null(
- xfs_dquot_t *dq)
-{
- /*
- * Called from XFS, where we always check first for a NULL dquot.
- */
- if (!dq)
- return;
- xfs_qm_dqrele(dq);
-}
-
-
-struct xfs_qmops xfs_qmcore_xfs = {
- .xfs_qminit = xfs_qm_newmount,
- .xfs_qmdone = xfs_qm_unmount_quotadestroy,
- .xfs_qmmount = xfs_qm_endmount,
- .xfs_qmunmount = xfs_qm_unmount_quotas,
- .xfs_dqrele = xfs_qm_dqrele_null,
- .xfs_dqattach = xfs_qm_dqattach,
- .xfs_dqdetach = xfs_qm_dqdetach,
- .xfs_dqpurgeall = xfs_qm_dqpurge_all,
- .xfs_dqvopalloc = xfs_qm_vop_dqalloc,
- .xfs_dqvopcreate = xfs_qm_vop_dqattach_and_dqmod_newinode,
- .xfs_dqvoprename = xfs_qm_vop_rename_dqattach,
- .xfs_dqvopchown = xfs_qm_vop_chown,
- .xfs_dqvopchownresv = xfs_qm_vop_chown_reserve,
- .xfs_dqstatvfs = xfs_qm_statvfs,
- .xfs_dqsync = xfs_qm_sync,
- .xfs_quotactl = xfs_qm_quotactl,
- .xfs_dqtrxops = &xfs_trans_dquot_ops,
-};
-EXPORT_SYMBOL(xfs_qmcore_xfs);
-
-void __init
-xfs_qm_init(void)
-{
- printk(KERN_INFO "SGI XFS Quota Management subsystem\n");
- mutex_init(&xfs_Gqm_lock);
- xfs_qm_init_procfs();
-}
-
-void __exit
-xfs_qm_exit(void)
-{
- xfs_qm_cleanup_procfs();
- if (qm_dqzone)
- kmem_zone_destroy(qm_dqzone);
- if (qm_dqtrxzone)
- kmem_zone_destroy(qm_dqtrxzone);
-}
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c
deleted file mode 100644
index 709f5f545cf..00000000000
--- a/fs/xfs/quota/xfs_qm_stats.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2000-2003 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
-#include "xfs_quota.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_ialloc.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_rtalloc.h"
-#include "xfs_error.h"
-#include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_qm.h"
-
-struct xqmstats xqmstats;
-
-STATIC int
-xfs_qm_read_xfsquota(
- char *buffer,
- char **start,
- off_t offset,
- int count,
- int *eof,
- void *data)
-{
- int len;
-
- /* maximum; incore; ratio free to inuse; freelist */
- len = sprintf(buffer, "%d\t%d\t%d\t%u\n",
- ndquot,
- xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
- xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0,
- xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0);
-
- if (offset >= len) {
- *start = buffer;
- *eof = 1;
- return 0;
- }
- *start = buffer + offset;
- if ((len -= offset) > count)
- return count;
- *eof = 1;
-
- return len;
-}
-
-STATIC int
-xfs_qm_read_stats(
- char *buffer,
- char **start,
- off_t offset,
- int count,
- int *eof,
- void *data)
-{
- int len;
-
- /* quota performance statistics */
- len = sprintf(buffer, "qm %u %u %u %u %u %u %u %u\n",
- xqmstats.xs_qm_dqreclaims,
- xqmstats.xs_qm_dqreclaim_misses,
- xqmstats.xs_qm_dquot_dups,
- xqmstats.xs_qm_dqcachemisses,
- xqmstats.xs_qm_dqcachehits,
- xqmstats.xs_qm_dqwants,
- xqmstats.xs_qm_dqshake_reclaims,
- xqmstats.xs_qm_dqinact_reclaims);
-
- if (offset >= len) {
- *start = buffer;
- *eof = 1;
- return 0;
- }
- *start = buffer + offset;
- if ((len -= offset) > count)
- return count;
- *eof = 1;
-
- return len;
-}
-
-void
-xfs_qm_init_procfs(void)
-{
- create_proc_read_entry("fs/xfs/xqmstat", 0, NULL, xfs_qm_read_stats, NULL);
- create_proc_read_entry("fs/xfs/xqm", 0, NULL, xfs_qm_read_xfsquota, NULL);
-}
-
-void
-xfs_qm_cleanup_procfs(void)
-{
- remove_proc_entry("fs/xfs/xqm", NULL);
- remove_proc_entry("fs/xfs/xqmstat", NULL);
-}
diff --git a/fs/xfs/quota/xfs_qm_stats.h b/fs/xfs/quota/xfs_qm_stats.h
deleted file mode 100644
index 5b964fc0dc0..00000000000
--- a/fs/xfs/quota/xfs_qm_stats.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2002 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_QM_STATS_H__
-#define __XFS_QM_STATS_H__
-
-#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
-
-/*
- * XQM global statistics
- */
-struct xqmstats {
- __uint32_t xs_qm_dqreclaims;
- __uint32_t xs_qm_dqreclaim_misses;
- __uint32_t xs_qm_dquot_dups;
- __uint32_t xs_qm_dqcachemisses;
- __uint32_t xs_qm_dqcachehits;
- __uint32_t xs_qm_dqwants;
- __uint32_t xs_qm_dqshake_reclaims;
- __uint32_t xs_qm_dqinact_reclaims;
-};
-
-extern struct xqmstats xqmstats;
-
-# define XQM_STATS_INC(count) ( (count)++ )
-
-extern void xfs_qm_init_procfs(void);
-extern void xfs_qm_cleanup_procfs(void);
-
-#else
-
-# define XQM_STATS_INC(count) do { } while (0)
-
-static inline void xfs_qm_init_procfs(void) { };
-static inline void xfs_qm_cleanup_procfs(void) { };
-
-#endif
-
-#endif /* __XFS_QM_STATS_H__ */
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
deleted file mode 100644
index 768a3b27d2b..00000000000
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ /dev/null
@@ -1,1476 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/capability.h>
-
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
-#include "xfs_quota.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_ialloc.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_rtalloc.h"
-#include "xfs_error.h"
-#include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_utils.h"
-#include "xfs_qm.h"
-
-#ifdef DEBUG
-# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args)
-#else
-# define qdprintk(s, args...) do { } while (0)
-#endif
-
-STATIC int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint);
-STATIC int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint,
- fs_disk_quota_t *);
-STATIC int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
-STATIC int xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint,
- fs_disk_quota_t *);
-STATIC int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
-STATIC int xfs_qm_scall_quotaoff(xfs_mount_t *, uint, boolean_t);
-STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
-STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
- uint);
-STATIC uint xfs_qm_import_flags(uint);
-STATIC uint xfs_qm_export_flags(uint);
-STATIC uint xfs_qm_import_qtype_flags(uint);
-STATIC uint xfs_qm_export_qtype_flags(uint);
-STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *,
- fs_disk_quota_t *);
-
-
-/*
- * The main distribution switch of all XFS quotactl system calls.
- */
-int
-xfs_qm_quotactl(
- xfs_mount_t *mp,
- int cmd,
- int id,
- xfs_caddr_t addr)
-{
- int error;
-
- ASSERT(addr != NULL || cmd == Q_XQUOTASYNC);
-
- /*
- * The following commands are valid even when quotaoff.
- */
- switch (cmd) {
- case Q_XQUOTARM:
- /*
- * Truncate quota files. quota must be off.
- */
- if (XFS_IS_QUOTA_ON(mp))
- return XFS_ERROR(EINVAL);
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return XFS_ERROR(EROFS);
- return (xfs_qm_scall_trunc_qfiles(mp,
- xfs_qm_import_qtype_flags(*(uint *)addr)));
-
- case Q_XGETQSTAT:
- /*
- * Get quota status information.
- */
- return (xfs_qm_scall_getqstat(mp, (fs_quota_stat_t *)addr));
-
- case Q_XQUOTAON:
- /*
- * QUOTAON - enabling quota enforcement.
- * Quota accounting must be turned on at mount time.
- */
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return XFS_ERROR(EROFS);
- return (xfs_qm_scall_quotaon(mp,
- xfs_qm_import_flags(*(uint *)addr)));
-
- case Q_XQUOTAOFF:
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return XFS_ERROR(EROFS);
- break;
-
- case Q_XQUOTASYNC:
- return (xfs_sync_inodes(mp, SYNC_DELWRI, NULL));
-
- default:
- break;
- }
-
- if (! XFS_IS_QUOTA_ON(mp))
- return XFS_ERROR(ESRCH);
-
- switch (cmd) {
- case Q_XQUOTAOFF:
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return XFS_ERROR(EROFS);
- error = xfs_qm_scall_quotaoff(mp,
- xfs_qm_import_flags(*(uint *)addr),
- B_FALSE);
- break;
-
- case Q_XGETQUOTA:
- error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_USER,
- (fs_disk_quota_t *)addr);
- break;
- case Q_XGETGQUOTA:
- error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
- (fs_disk_quota_t *)addr);
- break;
- case Q_XGETPQUOTA:
- error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_PROJ,
- (fs_disk_quota_t *)addr);
- break;
-
- case Q_XSETQLIM:
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return XFS_ERROR(EROFS);
- error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_USER,
- (fs_disk_quota_t *)addr);
- break;
- case Q_XSETGQLIM:
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return XFS_ERROR(EROFS);
- error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
- (fs_disk_quota_t *)addr);
- break;
- case Q_XSETPQLIM:
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return XFS_ERROR(EROFS);
- error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_PROJ,
- (fs_disk_quota_t *)addr);
- break;
-
- default:
- error = XFS_ERROR(EINVAL);
- break;
- }
-
- return (error);
-}
-
-/*
- * Turn off quota accounting and/or enforcement for all udquots and/or
- * gdquots. Called only at unmount time.
- *
- * This assumes that there are no dquots of this file system cached
- * incore, and modifies the ondisk dquot directly. Therefore, for example,
- * it is an error to call this twice, without purging the cache.
- */
-STATIC int
-xfs_qm_scall_quotaoff(
- xfs_mount_t *mp,
- uint flags,
- boolean_t force)
-{
- uint dqtype;
- int error;
- uint inactivate_flags;
- xfs_qoff_logitem_t *qoffstart;
- int nculprits;
-
- if (!force && !capable(CAP_SYS_ADMIN))
- return XFS_ERROR(EPERM);
- /*
- * No file system can have quotas enabled on disk but not in core.
- * Note that quota utilities (like quotaoff) _expect_
- * errno == EEXIST here.
- */
- if ((mp->m_qflags & flags) == 0)
- return XFS_ERROR(EEXIST);
- error = 0;
-
- flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
-
- /*
- * We don't want to deal with two quotaoffs messing up each other,
- * so we're going to serialize it. quotaoff isn't exactly a performance
- * critical thing.
- * If quotaoff, then we must be dealing with the root filesystem.
- */
- ASSERT(mp->m_quotainfo);
- if (mp->m_quotainfo)
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
-
- ASSERT(mp->m_quotainfo);
-
- /*
- * If we're just turning off quota enforcement, change mp and go.
- */
- if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
- mp->m_qflags &= ~(flags);
-
- spin_lock(&mp->m_sb_lock);
- mp->m_sb.sb_qflags = mp->m_qflags;
- spin_unlock(&mp->m_sb_lock);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
-
- /* XXX what to do if error ? Revert back to old vals incore ? */
- error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
- return (error);
- }
-
- dqtype = 0;
- inactivate_flags = 0;
- /*
- * If accounting is off, we must turn enforcement off, clear the
- * quota 'CHKD' certificate to make it known that we have to
- * do a quotacheck the next time this quota is turned on.
- */
- if (flags & XFS_UQUOTA_ACCT) {
- dqtype |= XFS_QMOPT_UQUOTA;
- flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
- inactivate_flags |= XFS_UQUOTA_ACTIVE;
- }
- if (flags & XFS_GQUOTA_ACCT) {
- dqtype |= XFS_QMOPT_GQUOTA;
- flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
- inactivate_flags |= XFS_GQUOTA_ACTIVE;
- } else if (flags & XFS_PQUOTA_ACCT) {
- dqtype |= XFS_QMOPT_PQUOTA;
- flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
- inactivate_flags |= XFS_PQUOTA_ACTIVE;
- }
-
- /*
- * Nothing to do? Don't complain. This happens when we're just
- * turning off quota enforcement.
- */
- if ((mp->m_qflags & flags) == 0) {
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
- return (0);
- }
-
- /*
- * Write the LI_QUOTAOFF log record, and do SB changes atomically,
- * and synchronously. If we fail to write, we should abort the
- * operation as it cannot be recovered safely if we crash.
- */
- error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
- if (error)
- goto out_error;
-
- /*
- * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
- * to take care of the race between dqget and quotaoff. We don't take
- * any special locks to reset these bits. All processes need to check
- * these bits *after* taking inode lock(s) to see if the particular
- * quota type is in the process of being turned off. If *ACTIVE, it is
- * guaranteed that all dquot structures and all quotainode ptrs will all
- * stay valid as long as that inode is kept locked.
- *
- * There is no turning back after this.
- */
- mp->m_qflags &= ~inactivate_flags;
-
- /*
- * Give back all the dquot reference(s) held by inodes.
- * Here we go thru every single incore inode in this file system, and
- * do a dqrele on the i_udquot/i_gdquot that it may have.
- * Essentially, as long as somebody has an inode locked, this guarantees
- * that quotas will not be turned off. This is handy because in a
- * transaction once we lock the inode(s) and check for quotaon, we can
- * depend on the quota inodes (and other things) being valid as long as
- * we keep the lock(s).
- */
- xfs_qm_dqrele_all_inodes(mp, flags);
-
- /*
- * Next we make the changes in the quota flag in the mount struct.
- * This isn't protected by a particular lock directly, because we
- * don't want to take a mrlock everytime we depend on quotas being on.
- */
- mp->m_qflags &= ~(flags);
-
- /*
- * Go through all the dquots of this file system and purge them,
- * according to what was turned off. We may not be able to get rid
- * of all dquots, because dquots can have temporary references that
- * are not attached to inodes. eg. xfs_setattr, xfs_create.
- * So, if we couldn't purge all the dquots from the filesystem,
- * we can't get rid of the incore data structures.
- */
- while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype|XFS_QMOPT_QUOTAOFF)))
- delay(10 * nculprits);
-
- /*
- * Transactions that had started before ACTIVE state bit was cleared
- * could have logged many dquots, so they'd have higher LSNs than
- * the first QUOTAOFF log record does. If we happen to crash when
- * the tail of the log has gone past the QUOTAOFF record, but
- * before the last dquot modification, those dquots __will__
- * recover, and that's not good.
- *
- * So, we have QUOTAOFF start and end logitems; the start
- * logitem won't get overwritten until the end logitem appears...
- */
- error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
- if (error) {
- /* We're screwed now. Shutdown is the only option. */
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- goto out_error;
- }
-
- /*
- * If quotas is completely disabled, close shop.
- */
- if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
- ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
- xfs_qm_destroy_quotainfo(mp);
- return (0);
- }
-
- /*
- * Release our quotainode references, and vn_purge them,
- * if we don't need them anymore.
- */
- if ((dqtype & XFS_QMOPT_UQUOTA) && XFS_QI_UQIP(mp)) {
- XFS_PURGE_INODE(XFS_QI_UQIP(mp));
- XFS_QI_UQIP(mp) = NULL;
- }
- if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && XFS_QI_GQIP(mp)) {
- XFS_PURGE_INODE(XFS_QI_GQIP(mp));
- XFS_QI_GQIP(mp) = NULL;
- }
-out_error:
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
-
- return (error);
-}
-
-STATIC int
-xfs_qm_scall_trunc_qfiles(
- xfs_mount_t *mp,
- uint flags)
-{
- int error = 0, error2 = 0;
- xfs_inode_t *qip;
-
- if (!capable(CAP_SYS_ADMIN))
- return XFS_ERROR(EPERM);
- if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
- qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
- return XFS_ERROR(EINVAL);
- }
-
- if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
- error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0);
- if (!error) {
- error = xfs_truncate_file(mp, qip);
- IRELE(qip);
- }
- }
-
- if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
- mp->m_sb.sb_gquotino != NULLFSINO) {
- error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
- if (!error2) {
- error2 = xfs_truncate_file(mp, qip);
- IRELE(qip);
- }
- }
-
- return error ? error : error2;
-}
-
-
-/*
- * Switch on (a given) quota enforcement for a filesystem. This takes
- * effect immediately.
- * (Switching on quota accounting must be done at mount time.)
- */
-STATIC int
-xfs_qm_scall_quotaon(
- xfs_mount_t *mp,
- uint flags)
-{
- int error;
- uint qf;
- uint accflags;
- __int64_t sbflags;
-
- if (!capable(CAP_SYS_ADMIN))
- return XFS_ERROR(EPERM);
-
- flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
- /*
- * Switching on quota accounting must be done at mount time.
- */
- accflags = flags & XFS_ALL_QUOTA_ACCT;
- flags &= ~(XFS_ALL_QUOTA_ACCT);
-
- sbflags = 0;
-
- if (flags == 0) {
- qdprintk("quotaon: zero flags, m_qflags=%x\n", mp->m_qflags);
- return XFS_ERROR(EINVAL);
- }
-
- /* No fs can turn on quotas with a delayed effect */
- ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
-
- /*
- * Can't enforce without accounting. We check the superblock
- * qflags here instead of m_qflags because rootfs can have
- * quota acct on ondisk without m_qflags' knowing.
- */
- if (((flags & XFS_UQUOTA_ACCT) == 0 &&
- (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
- (flags & XFS_UQUOTA_ENFD))
- ||
- ((flags & XFS_PQUOTA_ACCT) == 0 &&
- (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
- (flags & XFS_GQUOTA_ACCT) == 0 &&
- (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
- (flags & XFS_OQUOTA_ENFD))) {
- qdprintk("Can't enforce without acct, flags=%x sbflags=%x\n",
- flags, mp->m_sb.sb_qflags);
- return XFS_ERROR(EINVAL);
- }
- /*
- * If everything's upto-date incore, then don't waste time.
- */
- if ((mp->m_qflags & flags) == flags)
- return XFS_ERROR(EEXIST);
-
- /*
- * Change sb_qflags on disk but not incore mp->qflags
- * if this is the root filesystem.
- */
- spin_lock(&mp->m_sb_lock);
- qf = mp->m_sb.sb_qflags;
- mp->m_sb.sb_qflags = qf | flags;
- spin_unlock(&mp->m_sb_lock);
-
- /*
- * There's nothing to change if it's the same.
- */
- if ((qf & flags) == flags && sbflags == 0)
- return XFS_ERROR(EEXIST);
- sbflags |= XFS_SB_QFLAGS;
-
- if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
- return (error);
- /*
- * If we aren't trying to switch on quota enforcement, we are done.
- */
- if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
- (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
- ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
- (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
- ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
- (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
- (flags & XFS_ALL_QUOTA_ENFD) == 0)
- return (0);
-
- if (! XFS_IS_QUOTA_RUNNING(mp))
- return XFS_ERROR(ESRCH);
-
- /*
- * Switch on quota enforcement in core.
- */
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
- mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
-
- return (0);
-}
-
-
-/*
- * Return quota status information, such as uquota-off, enforcements, etc.
- */
-STATIC int
-xfs_qm_scall_getqstat(
- xfs_mount_t *mp,
- fs_quota_stat_t *out)
-{
- xfs_inode_t *uip, *gip;
- boolean_t tempuqip, tempgqip;
-
- uip = gip = NULL;
- tempuqip = tempgqip = B_FALSE;
- memset(out, 0, sizeof(fs_quota_stat_t));
-
- out->qs_version = FS_QSTAT_VERSION;
- if (!xfs_sb_version_hasquota(&mp->m_sb)) {
- out->qs_uquota.qfs_ino = NULLFSINO;
- out->qs_gquota.qfs_ino = NULLFSINO;
- return (0);
- }
- out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
- (XFS_ALL_QUOTA_ACCT|
- XFS_ALL_QUOTA_ENFD));
- out->qs_pad = 0;
- out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
- out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
-
- if (mp->m_quotainfo) {
- uip = mp->m_quotainfo->qi_uquotaip;
- gip = mp->m_quotainfo->qi_gquotaip;
- }
- if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
- if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip, 0) == 0)
- tempuqip = B_TRUE;
- }
- if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
- if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip, 0) == 0)
- tempgqip = B_TRUE;
- }
- if (uip) {
- out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
- out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
- if (tempuqip)
- IRELE(uip);
- }
- if (gip) {
- out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
- out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
- if (tempgqip)
- IRELE(gip);
- }
- if (mp->m_quotainfo) {
- out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp);
- out->qs_btimelimit = XFS_QI_BTIMELIMIT(mp);
- out->qs_itimelimit = XFS_QI_ITIMELIMIT(mp);
- out->qs_rtbtimelimit = XFS_QI_RTBTIMELIMIT(mp);
- out->qs_bwarnlimit = XFS_QI_BWARNLIMIT(mp);
- out->qs_iwarnlimit = XFS_QI_IWARNLIMIT(mp);
- }
- return (0);
-}
-
-/*
- * Adjust quota limits, and start/stop timers accordingly.
- */
-STATIC int
-xfs_qm_scall_setqlim(
- xfs_mount_t *mp,
- xfs_dqid_t id,
- uint type,
- fs_disk_quota_t *newlim)
-{
- xfs_disk_dquot_t *ddq;
- xfs_dquot_t *dqp;
- xfs_trans_t *tp;
- int error;
- xfs_qcnt_t hard, soft;
-
- if (!capable(CAP_SYS_ADMIN))
- return XFS_ERROR(EPERM);
-
- if ((newlim->d_fieldmask &
- (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0)
- return (0);
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
- if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
- 0, 0, XFS_DEFAULT_LOG_COUNT))) {
- xfs_trans_cancel(tp, 0);
- return (error);
- }
-
- /*
- * We don't want to race with a quotaoff so take the quotaoff lock.
- * (We don't hold an inode lock, so there's nothing else to stop
- * a quotaoff from happening). (XXXThis doesn't currently happen
- * because we take the vfslock before calling xfs_qm_sysent).
- */
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
-
- /*
- * Get the dquot (locked), and join it to the transaction.
- * Allocate the dquot if this doesn't exist.
- */
- if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
- xfs_trans_cancel(tp, XFS_TRANS_ABORT);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
- ASSERT(error != ENOENT);
- return (error);
- }
- xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET");
- xfs_trans_dqjoin(tp, dqp);
- ddq = &dqp->q_core;
-
- /*
- * Make sure that hardlimits are >= soft limits before changing.
- */
- hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
- be64_to_cpu(ddq->d_blk_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
- be64_to_cpu(ddq->d_blk_softlimit);
- if (hard == 0 || hard >= soft) {
- ddq->d_blk_hardlimit = cpu_to_be64(hard);
- ddq->d_blk_softlimit = cpu_to_be64(soft);
- if (id == 0) {
- mp->m_quotainfo->qi_bhardlimit = hard;
- mp->m_quotainfo->qi_bsoftlimit = soft;
- }
- } else {
- qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft);
- }
- hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
- be64_to_cpu(ddq->d_rtb_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
- be64_to_cpu(ddq->d_rtb_softlimit);
- if (hard == 0 || hard >= soft) {
- ddq->d_rtb_hardlimit = cpu_to_be64(hard);
- ddq->d_rtb_softlimit = cpu_to_be64(soft);
- if (id == 0) {
- mp->m_quotainfo->qi_rtbhardlimit = hard;
- mp->m_quotainfo->qi_rtbsoftlimit = soft;
- }
- } else {
- qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
- }
-
- hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
- (xfs_qcnt_t) newlim->d_ino_hardlimit :
- be64_to_cpu(ddq->d_ino_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
- (xfs_qcnt_t) newlim->d_ino_softlimit :
- be64_to_cpu(ddq->d_ino_softlimit);
- if (hard == 0 || hard >= soft) {
- ddq->d_ino_hardlimit = cpu_to_be64(hard);
- ddq->d_ino_softlimit = cpu_to_be64(soft);
- if (id == 0) {
- mp->m_quotainfo->qi_ihardlimit = hard;
- mp->m_quotainfo->qi_isoftlimit = soft;
- }
- } else {
- qdprintk("ihard %Ld < isoft %Ld\n", hard, soft);
- }
-
- /*
- * Update warnings counter(s) if requested
- */
- if (newlim->d_fieldmask & FS_DQ_BWARNS)
- ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
- if (newlim->d_fieldmask & FS_DQ_IWARNS)
- ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
- if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
- ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
-
- if (id == 0) {
- /*
- * Timelimits for the super user set the relative time
- * the other users can be over quota for this file system.
- * If it is zero a default is used. Ditto for the default
- * soft and hard limit values (already done, above), and
- * for warnings.
- */
- if (newlim->d_fieldmask & FS_DQ_BTIMER) {
- mp->m_quotainfo->qi_btimelimit = newlim->d_btimer;
- ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
- }
- if (newlim->d_fieldmask & FS_DQ_ITIMER) {
- mp->m_quotainfo->qi_itimelimit = newlim->d_itimer;
- ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
- }
- if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
- mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer;
- ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
- }
- if (newlim->d_fieldmask & FS_DQ_BWARNS)
- mp->m_quotainfo->qi_bwarnlimit = newlim->d_bwarns;
- if (newlim->d_fieldmask & FS_DQ_IWARNS)
- mp->m_quotainfo->qi_iwarnlimit = newlim->d_iwarns;
- if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
- mp->m_quotainfo->qi_rtbwarnlimit = newlim->d_rtbwarns;
- } else {
- /*
- * If the user is now over quota, start the timelimit.
- * The user will not be 'warned'.
- * Note that we keep the timers ticking, whether enforcement
- * is on or off. We don't really want to bother with iterating
- * over all ondisk dquots and turning the timers on/off.
- */
- xfs_qm_adjust_dqtimers(mp, ddq);
- }
- dqp->dq_flags |= XFS_DQ_DIRTY;
- xfs_trans_log_dquot(tp, dqp);
-
- xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT");
- error = xfs_trans_commit(tp, 0);
- xfs_qm_dqprint(dqp);
- xfs_qm_dqrele(dqp);
- mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
-
- return error;
-}
-
-STATIC int
-xfs_qm_scall_getquota(
- xfs_mount_t *mp,
- xfs_dqid_t id,
- uint type,
- fs_disk_quota_t *out)
-{
- xfs_dquot_t *dqp;
- int error;
-
- /*
- * Try to get the dquot. We don't want it allocated on disk, so
- * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
- * exist, we'll get ENOENT back.
- */
- if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) {
- return (error);
- }
-
- xfs_dqtrace_entry(dqp, "Q_GETQUOTA SUCCESS");
- /*
- * If everything's NULL, this dquot doesn't quite exist as far as
- * our utility programs are concerned.
- */
- if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
- xfs_qm_dqput(dqp);
- return XFS_ERROR(ENOENT);
- }
- /* xfs_qm_dqprint(dqp); */
- /*
- * Convert the disk dquot to the exportable format
- */
- xfs_qm_export_dquot(mp, &dqp->q_core, out);
- xfs_qm_dqput(dqp);
- return (error ? XFS_ERROR(EFAULT) : 0);
-}
-
-
-STATIC int
-xfs_qm_log_quotaoff_end(
- xfs_mount_t *mp,
- xfs_qoff_logitem_t *startqoff,
- uint flags)
-{
- xfs_trans_t *tp;
- int error;
- xfs_qoff_logitem_t *qoffi;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
-
- if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2,
- 0, 0, XFS_DEFAULT_LOG_COUNT))) {
- xfs_trans_cancel(tp, 0);
- return (error);
- }
-
- qoffi = xfs_trans_get_qoff_item(tp, startqoff,
- flags & XFS_ALL_QUOTA_ACCT);
- xfs_trans_log_quotaoff_item(tp, qoffi);
-
- /*
- * We have to make sure that the transaction is secure on disk before we
- * return and actually stop quota accounting. So, make it synchronous.
- * We don't care about quotoff's performance.
- */
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
- return (error);
-}
-
-
-STATIC int
-xfs_qm_log_quotaoff(
- xfs_mount_t *mp,
- xfs_qoff_logitem_t **qoffstartp,
- uint flags)
-{
- xfs_trans_t *tp;
- int error;
- xfs_qoff_logitem_t *qoffi=NULL;
- uint oldsbqflag=0;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
- if ((error = xfs_trans_reserve(tp, 0,
- sizeof(xfs_qoff_logitem_t) * 2 +
- mp->m_sb.sb_sectsize + 128,
- 0,
- 0,
- XFS_DEFAULT_LOG_COUNT))) {
- goto error0;
- }
-
- qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
- xfs_trans_log_quotaoff_item(tp, qoffi);
-
- spin_lock(&mp->m_sb_lock);
- oldsbqflag = mp->m_sb.sb_qflags;
- mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
- spin_unlock(&mp->m_sb_lock);
-
- xfs_mod_sb(tp, XFS_SB_QFLAGS);
-
- /*
- * We have to make sure that the transaction is secure on disk before we
- * return and actually stop quota accounting. So, make it synchronous.
- * We don't care about quotoff's performance.
- */
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
-
-error0:
- if (error) {
- xfs_trans_cancel(tp, 0);
- /*
- * No one else is modifying sb_qflags, so this is OK.
- * We still hold the quotaofflock.
- */
- spin_lock(&mp->m_sb_lock);
- mp->m_sb.sb_qflags = oldsbqflag;
- spin_unlock(&mp->m_sb_lock);
- }
- *qoffstartp = qoffi;
- return (error);
-}
-
-
-/*
- * Translate an internal style on-disk-dquot to the exportable format.
- * The main differences are that the counters/limits are all in Basic
- * Blocks (BBs) instead of the internal FSBs, and all on-disk data has
- * to be converted to the native endianness.
- */
-STATIC void
-xfs_qm_export_dquot(
- xfs_mount_t *mp,
- xfs_disk_dquot_t *src,
- struct fs_disk_quota *dst)
-{
- memset(dst, 0, sizeof(*dst));
- dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */
- dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags);
- dst->d_id = be32_to_cpu(src->d_id);
- dst->d_blk_hardlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit));
- dst->d_blk_softlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit));
- dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit);
- dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit);
- dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount));
- dst->d_icount = be64_to_cpu(src->d_icount);
- dst->d_btimer = be32_to_cpu(src->d_btimer);
- dst->d_itimer = be32_to_cpu(src->d_itimer);
- dst->d_iwarns = be16_to_cpu(src->d_iwarns);
- dst->d_bwarns = be16_to_cpu(src->d_bwarns);
- dst->d_rtb_hardlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit));
- dst->d_rtb_softlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit));
- dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount));
- dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer);
- dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns);
-
- /*
- * Internally, we don't reset all the timers when quota enforcement
- * gets turned off. No need to confuse the user level code,
- * so return zeroes in that case.
- */
- if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) ||
- (!XFS_IS_OQUOTA_ENFORCED(mp) &&
- (src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
- dst->d_btimer = 0;
- dst->d_itimer = 0;
- dst->d_rtbtimer = 0;
- }
-
-#ifdef DEBUG
- if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == XFS_USER_QUOTA) ||
- (XFS_IS_OQUOTA_ENFORCED(mp) &&
- (dst->d_flags & (XFS_PROJ_QUOTA | XFS_GROUP_QUOTA)))) &&
- dst->d_id != 0) {
- if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) &&
- (dst->d_blk_softlimit > 0)) {
- ASSERT(dst->d_btimer != 0);
- }
- if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) &&
- (dst->d_ino_softlimit > 0)) {
- ASSERT(dst->d_itimer != 0);
- }
- }
-#endif
-}
-
-STATIC uint
-xfs_qm_import_qtype_flags(
- uint uflags)
-{
- uint oflags = 0;
-
- /*
- * Can't be more than one, or none.
- */
- if (((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ==
- (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ||
- ((uflags & (XFS_GROUP_QUOTA | XFS_PROJ_QUOTA)) ==
- (XFS_GROUP_QUOTA | XFS_PROJ_QUOTA)) ||
- ((uflags & (XFS_USER_QUOTA | XFS_PROJ_QUOTA)) ==
- (XFS_USER_QUOTA | XFS_PROJ_QUOTA)) ||
- ((uflags & (XFS_GROUP_QUOTA|XFS_USER_QUOTA|XFS_PROJ_QUOTA)) == 0))
- return (0);
-
- oflags |= (uflags & XFS_USER_QUOTA) ? XFS_DQ_USER : 0;
- oflags |= (uflags & XFS_PROJ_QUOTA) ? XFS_DQ_PROJ : 0;
- oflags |= (uflags & XFS_GROUP_QUOTA) ? XFS_DQ_GROUP: 0;
- return oflags;
-}
-
-STATIC uint
-xfs_qm_export_qtype_flags(
- uint flags)
-{
- /*
- * Can't be more than one, or none.
- */
- ASSERT((flags & (XFS_PROJ_QUOTA | XFS_USER_QUOTA)) !=
- (XFS_PROJ_QUOTA | XFS_USER_QUOTA));
- ASSERT((flags & (XFS_PROJ_QUOTA | XFS_GROUP_QUOTA)) !=
- (XFS_PROJ_QUOTA | XFS_GROUP_QUOTA));
- ASSERT((flags & (XFS_USER_QUOTA | XFS_GROUP_QUOTA)) !=
- (XFS_USER_QUOTA | XFS_GROUP_QUOTA));
- ASSERT((flags & (XFS_PROJ_QUOTA|XFS_USER_QUOTA|XFS_GROUP_QUOTA)) != 0);
-
- return (flags & XFS_DQ_USER) ?
- XFS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
- XFS_PROJ_QUOTA : XFS_GROUP_QUOTA;
-}
-
-STATIC uint
-xfs_qm_import_flags(
- uint uflags)
-{
- uint flags = 0;
-
- if (uflags & XFS_QUOTA_UDQ_ACCT)
- flags |= XFS_UQUOTA_ACCT;
- if (uflags & XFS_QUOTA_PDQ_ACCT)
- flags |= XFS_PQUOTA_ACCT;
- if (uflags & XFS_QUOTA_GDQ_ACCT)
- flags |= XFS_GQUOTA_ACCT;
- if (uflags & XFS_QUOTA_UDQ_ENFD)
- flags |= XFS_UQUOTA_ENFD;
- if (uflags & (XFS_QUOTA_PDQ_ENFD|XFS_QUOTA_GDQ_ENFD))
- flags |= XFS_OQUOTA_ENFD;
- return (flags);
-}
-
-
-STATIC uint
-xfs_qm_export_flags(
- uint flags)
-{
- uint uflags;
-
- uflags = 0;
- if (flags & XFS_UQUOTA_ACCT)
- uflags |= XFS_QUOTA_UDQ_ACCT;
- if (flags & XFS_PQUOTA_ACCT)
- uflags |= XFS_QUOTA_PDQ_ACCT;
- if (flags & XFS_GQUOTA_ACCT)
- uflags |= XFS_QUOTA_GDQ_ACCT;
- if (flags & XFS_UQUOTA_ENFD)
- uflags |= XFS_QUOTA_UDQ_ENFD;
- if (flags & (XFS_OQUOTA_ENFD)) {
- uflags |= (flags & XFS_GQUOTA_ACCT) ?
- XFS_QUOTA_GDQ_ENFD : XFS_QUOTA_PDQ_ENFD;
- }
- return (uflags);
-}
-
-
-/*
- * Go thru all the inodes in the file system, releasing their dquots.
- * Note that the mount structure gets modified to indicate that quotas are off
- * AFTER this, in the case of quotaoff. This also gets called from
- * xfs_rootumount.
- */
-void
-xfs_qm_dqrele_all_inodes(
- struct xfs_mount *mp,
- uint flags)
-{
- xfs_inode_t *ip, *topino;
- uint ireclaims;
- bhv_vnode_t *vp;
- boolean_t vnode_refd;
-
- ASSERT(mp->m_quotainfo);
-
- XFS_MOUNT_ILOCK(mp);
-again:
- ip = mp->m_inodes;
- if (ip == NULL) {
- XFS_MOUNT_IUNLOCK(mp);
- return;
- }
- do {
- /* Skip markers inserted by xfs_sync */
- if (ip->i_mount == NULL) {
- ip = ip->i_mnext;
- continue;
- }
- /* Root inode, rbmip and rsumip have associated blocks */
- if (ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) {
- ASSERT(ip->i_udquot == NULL);
- ASSERT(ip->i_gdquot == NULL);
- ip = ip->i_mnext;
- continue;
- }
- vp = XFS_ITOV_NULL(ip);
- if (!vp) {
- ASSERT(ip->i_udquot == NULL);
- ASSERT(ip->i_gdquot == NULL);
- ip = ip->i_mnext;
- continue;
- }
- vnode_refd = B_FALSE;
- if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) {
- ireclaims = mp->m_ireclaims;
- topino = mp->m_inodes;
- vp = vn_grab(vp);
- if (!vp)
- goto again;
-
- XFS_MOUNT_IUNLOCK(mp);
- /* XXX restart limit ? */
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- vnode_refd = B_TRUE;
- } else {
- ireclaims = mp->m_ireclaims;
- topino = mp->m_inodes;
- XFS_MOUNT_IUNLOCK(mp);
- }
-
- /*
- * We don't keep the mountlock across the dqrele() call,
- * since it can take a while..
- */
- if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
- xfs_qm_dqrele(ip->i_udquot);
- ip->i_udquot = NULL;
- }
- if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
- xfs_qm_dqrele(ip->i_gdquot);
- ip->i_gdquot = NULL;
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- /*
- * Wait until we've dropped the ilock and mountlock to
- * do the vn_rele. Or be condemned to an eternity in the
- * inactive code in hell.
- */
- if (vnode_refd)
- IRELE(ip);
- XFS_MOUNT_ILOCK(mp);
- /*
- * If an inode was inserted or removed, we gotta
- * start over again.
- */
- if (topino != mp->m_inodes || mp->m_ireclaims != ireclaims) {
- /* XXX use a sentinel */
- goto again;
- }
- ip = ip->i_mnext;
- } while (ip != mp->m_inodes);
-
- XFS_MOUNT_IUNLOCK(mp);
-}
-
-/*------------------------------------------------------------------------*/
-#ifdef DEBUG
-/*
- * This contains all the test functions for XFS disk quotas.
- * Currently it does a quota accounting check. ie. it walks through
- * all inodes in the file system, calculating the dquot accounting fields,
- * and prints out any inconsistencies.
- */
-xfs_dqhash_t *qmtest_udqtab;
-xfs_dqhash_t *qmtest_gdqtab;
-int qmtest_hashmask;
-int qmtest_nfails;
-mutex_t qcheck_lock;
-
-#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
- (__psunsigned_t)(id)) & \
- (qmtest_hashmask - 1))
-
-#define DQTEST_HASH(mp, id, type) ((type & XFS_DQ_USER) ? \
- (qmtest_udqtab + \
- DQTEST_HASHVAL(mp, id)) : \
- (qmtest_gdqtab + \
- DQTEST_HASHVAL(mp, id)))
-
-#define DQTEST_LIST_PRINT(l, NXT, title) \
-{ \
- xfs_dqtest_t *dqp; int i = 0;\
- cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
- for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \
- dqp = (xfs_dqtest_t *)dqp->NXT) { \
- cmn_err(CE_DEBUG, " %d. \"%d (%s)\" bcnt = %d, icnt = %d", \
- ++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp), \
- dqp->d_bcount, dqp->d_icount); } \
-}
-
-typedef struct dqtest {
- xfs_dqmarker_t q_lists;
- xfs_dqhash_t *q_hash; /* the hashchain header */
- xfs_mount_t *q_mount; /* filesystem this relates to */
- xfs_dqid_t d_id; /* user id or group id */
- xfs_qcnt_t d_bcount; /* # disk blocks owned by the user */
- xfs_qcnt_t d_icount; /* # inodes owned by the user */
-} xfs_dqtest_t;
-
-STATIC void
-xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp)
-{
- xfs_dquot_t *d;
- if (((d) = (h)->qh_next))
- (d)->HL_PREVP = &((dqp)->HL_NEXT);
- (dqp)->HL_NEXT = d;
- (dqp)->HL_PREVP = &((h)->qh_next);
- (h)->qh_next = (xfs_dquot_t *)dqp;
- (h)->qh_version++;
- (h)->qh_nelems++;
-}
-STATIC void
-xfs_qm_dqtest_print(
- xfs_dqtest_t *d)
-{
- cmn_err(CE_DEBUG, "-----------DQTEST DQUOT----------------");
- cmn_err(CE_DEBUG, "---- dquot ID = %d", d->d_id);
- cmn_err(CE_DEBUG, "---- fs = 0x%p", d->q_mount);
- cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)",
- d->d_bcount, (int)d->d_bcount);
- cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)",
- d->d_icount, (int)d->d_icount);
- cmn_err(CE_DEBUG, "---------------------------");
-}
-
-STATIC void
-xfs_qm_dqtest_failed(
- xfs_dqtest_t *d,
- xfs_dquot_t *dqp,
- char *reason,
- xfs_qcnt_t a,
- xfs_qcnt_t b,
- int error)
-{
- qmtest_nfails++;
- if (error)
- cmn_err(CE_DEBUG, "quotacheck failed id=%d, err=%d\nreason: %s",
- d->d_id, error, reason);
- else
- cmn_err(CE_DEBUG, "quotacheck failed id=%d (%s) [%d != %d]",
- d->d_id, reason, (int)a, (int)b);
- xfs_qm_dqtest_print(d);
- if (dqp)
- xfs_qm_dqprint(dqp);
-}
-
-STATIC int
-xfs_dqtest_cmp2(
- xfs_dqtest_t *d,
- xfs_dquot_t *dqp)
-{
- int err = 0;
- if (be64_to_cpu(dqp->q_core.d_icount) != d->d_icount) {
- xfs_qm_dqtest_failed(d, dqp, "icount mismatch",
- be64_to_cpu(dqp->q_core.d_icount),
- d->d_icount, 0);
- err++;
- }
- if (be64_to_cpu(dqp->q_core.d_bcount) != d->d_bcount) {
- xfs_qm_dqtest_failed(d, dqp, "bcount mismatch",
- be64_to_cpu(dqp->q_core.d_bcount),
- d->d_bcount, 0);
- err++;
- }
- if (dqp->q_core.d_blk_softlimit &&
- be64_to_cpu(dqp->q_core.d_bcount) >=
- be64_to_cpu(dqp->q_core.d_blk_softlimit)) {
- if (!dqp->q_core.d_btimer && dqp->q_core.d_id) {
- cmn_err(CE_DEBUG,
- "%d [%s] [0x%p] BLK TIMER NOT STARTED",
- d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
- err++;
- }
- }
- if (dqp->q_core.d_ino_softlimit &&
- be64_to_cpu(dqp->q_core.d_icount) >=
- be64_to_cpu(dqp->q_core.d_ino_softlimit)) {
- if (!dqp->q_core.d_itimer && dqp->q_core.d_id) {
- cmn_err(CE_DEBUG,
- "%d [%s] [0x%p] INO TIMER NOT STARTED",
- d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
- err++;
- }
- }
-#ifdef QUOTADEBUG
- if (!err) {
- cmn_err(CE_DEBUG, "%d [%s] [0x%p] qchecked",
- d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
- }
-#endif
- return (err);
-}
-
-STATIC void
-xfs_dqtest_cmp(
- xfs_dqtest_t *d)
-{
- xfs_dquot_t *dqp;
- int error;
-
- /* xfs_qm_dqtest_print(d); */
- if ((error = xfs_qm_dqget(d->q_mount, NULL, d->d_id, d->dq_flags, 0,
- &dqp))) {
- xfs_qm_dqtest_failed(d, NULL, "dqget failed", 0, 0, error);
- return;
- }
- xfs_dqtest_cmp2(d, dqp);
- xfs_qm_dqput(dqp);
-}
-
-STATIC int
-xfs_qm_internalqcheck_dqget(
- xfs_mount_t *mp,
- xfs_dqid_t id,
- uint type,
- xfs_dqtest_t **O_dq)
-{
- xfs_dqtest_t *d;
- xfs_dqhash_t *h;
-
- h = DQTEST_HASH(mp, id, type);
- for (d = (xfs_dqtest_t *) h->qh_next; d != NULL;
- d = (xfs_dqtest_t *) d->HL_NEXT) {
- /* DQTEST_LIST_PRINT(h, HL_NEXT, "@@@@@ dqtestlist @@@@@"); */
- if (d->d_id == id && mp == d->q_mount) {
- *O_dq = d;
- return (0);
- }
- }
- d = kmem_zalloc(sizeof(xfs_dqtest_t), KM_SLEEP);
- d->dq_flags = type;
- d->d_id = id;
- d->q_mount = mp;
- d->q_hash = h;
- xfs_qm_hashinsert(h, d);
- *O_dq = d;
- return (0);
-}
-
-STATIC void
-xfs_qm_internalqcheck_get_dquots(
- xfs_mount_t *mp,
- xfs_dqid_t uid,
- xfs_dqid_t projid,
- xfs_dqid_t gid,
- xfs_dqtest_t **ud,
- xfs_dqtest_t **gd)
-{
- if (XFS_IS_UQUOTA_ON(mp))
- xfs_qm_internalqcheck_dqget(mp, uid, XFS_DQ_USER, ud);
- if (XFS_IS_GQUOTA_ON(mp))
- xfs_qm_internalqcheck_dqget(mp, gid, XFS_DQ_GROUP, gd);
- else if (XFS_IS_PQUOTA_ON(mp))
- xfs_qm_internalqcheck_dqget(mp, projid, XFS_DQ_PROJ, gd);
-}
-
-
-STATIC void
-xfs_qm_internalqcheck_dqadjust(
- xfs_inode_t *ip,
- xfs_dqtest_t *d)
-{
- d->d_icount++;
- d->d_bcount += (xfs_qcnt_t)ip->i_d.di_nblocks;
-}
-
-STATIC int
-xfs_qm_internalqcheck_adjust(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode number to get data for */
- void __user *buffer, /* not used */
- int ubsize, /* not used */
- void *private_data, /* not used */
- xfs_daddr_t bno, /* starting block of inode cluster */
- int *ubused, /* not used */
- void *dip, /* not used */
- int *res) /* bulkstat result code */
-{
- xfs_inode_t *ip;
- xfs_dqtest_t *ud, *gd;
- uint lock_flags;
- boolean_t ipreleased;
- int error;
-
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
-
- if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
- *res = BULKSTAT_RV_NOTHING;
- qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n",
- (unsigned long long) ino,
- (unsigned long long) mp->m_sb.sb_uquotino,
- (unsigned long long) mp->m_sb.sb_gquotino);
- return XFS_ERROR(EINVAL);
- }
- ipreleased = B_FALSE;
- again:
- lock_flags = XFS_ILOCK_SHARED;
- if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) {
- *res = BULKSTAT_RV_NOTHING;
- return (error);
- }
-
- /*
- * This inode can have blocks after eof which can get released
- * when we send it to inactive. Since we don't check the dquot
- * until the after all our calculations are done, we must get rid
- * of those now.
- */
- if (! ipreleased) {
- xfs_iput(ip, lock_flags);
- ipreleased = B_TRUE;
- goto again;
- }
- xfs_qm_internalqcheck_get_dquots(mp,
- (xfs_dqid_t) ip->i_d.di_uid,
- (xfs_dqid_t) ip->i_d.di_projid,
- (xfs_dqid_t) ip->i_d.di_gid,
- &ud, &gd);
- if (XFS_IS_UQUOTA_ON(mp)) {
- ASSERT(ud);
- xfs_qm_internalqcheck_dqadjust(ip, ud);
- }
- if (XFS_IS_OQUOTA_ON(mp)) {
- ASSERT(gd);
- xfs_qm_internalqcheck_dqadjust(ip, gd);
- }
- xfs_iput(ip, lock_flags);
- *res = BULKSTAT_RV_DIDONE;
- return (0);
-}
-
-
-/* PRIVATE, debugging */
-int
-xfs_qm_internalqcheck(
- xfs_mount_t *mp)
-{
- xfs_ino_t lastino;
- int done, count;
- int i;
- xfs_dqtest_t *d, *e;
- xfs_dqhash_t *h1;
- int error;
-
- lastino = 0;
- qmtest_hashmask = 32;
- count = 5;
- done = 0;
- qmtest_nfails = 0;
-
- if (! XFS_IS_QUOTA_ON(mp))
- return XFS_ERROR(ESRCH);
-
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
- XFS_bflush(mp->m_ddev_targp);
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
- XFS_bflush(mp->m_ddev_targp);
-
- mutex_lock(&qcheck_lock);
- /* There should be absolutely no quota activity while this
- is going on. */
- qmtest_udqtab = kmem_zalloc(qmtest_hashmask *
- sizeof(xfs_dqhash_t), KM_SLEEP);
- qmtest_gdqtab = kmem_zalloc(qmtest_hashmask *
- sizeof(xfs_dqhash_t), KM_SLEEP);
- do {
- /*
- * Iterate thru all the inodes in the file system,
- * adjusting the corresponding dquot counters
- */
- if ((error = xfs_bulkstat(mp, &lastino, &count,
- xfs_qm_internalqcheck_adjust, NULL,
- 0, NULL, BULKSTAT_FG_IGET, &done))) {
- break;
- }
- } while (! done);
- if (error) {
- cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
- }
- cmn_err(CE_DEBUG, "Checking results against system dquots");
- for (i = 0; i < qmtest_hashmask; i++) {
- h1 = &qmtest_udqtab[i];
- for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
- xfs_dqtest_cmp(d);
- e = (xfs_dqtest_t *) d->HL_NEXT;
- kmem_free(d, sizeof(xfs_dqtest_t));
- d = e;
- }
- h1 = &qmtest_gdqtab[i];
- for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
- xfs_dqtest_cmp(d);
- e = (xfs_dqtest_t *) d->HL_NEXT;
- kmem_free(d, sizeof(xfs_dqtest_t));
- d = e;
- }
- }
-
- if (qmtest_nfails) {
- cmn_err(CE_DEBUG, "******** quotacheck failed ********");
- cmn_err(CE_DEBUG, "failures = %d", qmtest_nfails);
- } else {
- cmn_err(CE_DEBUG, "******** quotacheck successful! ********");
- }
- kmem_free(qmtest_udqtab, qmtest_hashmask * sizeof(xfs_dqhash_t));
- kmem_free(qmtest_gdqtab, qmtest_hashmask * sizeof(xfs_dqhash_t));
- mutex_unlock(&qcheck_lock);
- return (qmtest_nfails);
-}
-
-#endif /* DEBUG */
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h
deleted file mode 100644
index 5e4a40b1c56..00000000000
--- a/fs/xfs/quota/xfs_quota_priv.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2000-2003 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_QUOTA_PRIV_H__
-#define __XFS_QUOTA_PRIV_H__
-
-/*
- * Number of bmaps that we ask from bmapi when doing a quotacheck.
- * We make this restriction to keep the memory usage to a minimum.
- */
-#define XFS_DQITER_MAP_SIZE 10
-
-/* Number of dquots that fit in to a dquot block */
-#define XFS_QM_DQPERBLK(mp) ((mp)->m_quotainfo->qi_dqperchunk)
-
-#define XFS_DQ_IS_ADDEDTO_TRX(t, d) ((d)->q_transp == (t))
-
-#define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims)
-#define XFS_QI_UQIP(mp) ((mp)->m_quotainfo->qi_uquotaip)
-#define XFS_QI_GQIP(mp) ((mp)->m_quotainfo->qi_gquotaip)
-#define XFS_QI_DQCHUNKLEN(mp) ((mp)->m_quotainfo->qi_dqchunklen)
-#define XFS_QI_BTIMELIMIT(mp) ((mp)->m_quotainfo->qi_btimelimit)
-#define XFS_QI_RTBTIMELIMIT(mp) ((mp)->m_quotainfo->qi_rtbtimelimit)
-#define XFS_QI_ITIMELIMIT(mp) ((mp)->m_quotainfo->qi_itimelimit)
-#define XFS_QI_BWARNLIMIT(mp) ((mp)->m_quotainfo->qi_bwarnlimit)
-#define XFS_QI_RTBWARNLIMIT(mp) ((mp)->m_quotainfo->qi_rtbwarnlimit)
-#define XFS_QI_IWARNLIMIT(mp) ((mp)->m_quotainfo->qi_iwarnlimit)
-#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock)
-
-#define XFS_QI_MPL_LIST(mp) ((mp)->m_quotainfo->qi_dqlist)
-#define XFS_QI_MPLLOCK(mp) ((mp)->m_quotainfo->qi_dqlist.qh_lock)
-#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next)
-#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems)
-
-#define XQMLCK(h) (mutex_lock(&((h)->qh_lock)))
-#define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock)))
-#ifdef DEBUG
-struct xfs_dqhash;
-static inline int XQMISLCKD(struct xfs_dqhash *h)
-{
- if (mutex_trylock(&h->qh_lock)) {
- mutex_unlock(&h->qh_lock);
- return 0;
- }
- return 1;
-}
-#endif
-
-#define XFS_DQ_HASH_LOCK(h) XQMLCK(h)
-#define XFS_DQ_HASH_UNLOCK(h) XQMUNLCK(h)
-#define XFS_DQ_IS_HASH_LOCKED(h) XQMISLCKD(h)
-
-#define xfs_qm_mplist_lock(mp) XQMLCK(&(XFS_QI_MPL_LIST(mp)))
-#define xfs_qm_mplist_unlock(mp) XQMUNLCK(&(XFS_QI_MPL_LIST(mp)))
-#define XFS_QM_IS_MPLIST_LOCKED(mp) XQMISLCKD(&(XFS_QI_MPL_LIST(mp)))
-
-#define xfs_qm_freelist_lock(qm) XQMLCK(&((qm)->qm_dqfreelist))
-#define xfs_qm_freelist_unlock(qm) XQMUNLCK(&((qm)->qm_dqfreelist))
-
-/*
- * Hash into a bucket in the dquot hash table, based on <mp, id>.
- */
-#define XFS_DQ_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
- (__psunsigned_t)(id)) & \
- (xfs_Gqm->qm_dqhashmask - 1))
-#define XFS_DQ_HASH(mp, id, type) (type == XFS_DQ_USER ? \
- (xfs_Gqm->qm_usr_dqhtable + \
- XFS_DQ_HASHVAL(mp, id)) : \
- (xfs_Gqm->qm_grp_dqhtable + \
- XFS_DQ_HASHVAL(mp, id)))
-#define XFS_IS_DQTYPE_ON(mp, type) (type == XFS_DQ_USER ? \
- XFS_IS_UQUOTA_ON(mp) : \
- XFS_IS_OQUOTA_ON(mp))
-#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \
- !dqp->q_core.d_blk_hardlimit && \
- !dqp->q_core.d_blk_softlimit && \
- !dqp->q_core.d_rtb_hardlimit && \
- !dqp->q_core.d_rtb_softlimit && \
- !dqp->q_core.d_ino_hardlimit && \
- !dqp->q_core.d_ino_softlimit && \
- !dqp->q_core.d_bcount && \
- !dqp->q_core.d_rtbcount && \
- !dqp->q_core.d_icount)
-
-#define HL_PREVP dq_hashlist.ql_prevp
-#define HL_NEXT dq_hashlist.ql_next
-#define MPL_PREVP dq_mplist.ql_prevp
-#define MPL_NEXT dq_mplist.ql_next
-
-
-#define _LIST_REMOVE(h, dqp, PVP, NXT) \
- { \
- xfs_dquot_t *d; \
- if (((d) = (dqp)->NXT)) \
- (d)->PVP = (dqp)->PVP; \
- *((dqp)->PVP) = d; \
- (dqp)->NXT = NULL; \
- (dqp)->PVP = NULL; \
- (h)->qh_version++; \
- (h)->qh_nelems--; \
- }
-
-#define _LIST_INSERT(h, dqp, PVP, NXT) \
- { \
- xfs_dquot_t *d; \
- if (((d) = (h)->qh_next)) \
- (d)->PVP = &((dqp)->NXT); \
- (dqp)->NXT = d; \
- (dqp)->PVP = &((h)->qh_next); \
- (h)->qh_next = dqp; \
- (h)->qh_version++; \
- (h)->qh_nelems++; \
- }
-
-#define FOREACH_DQUOT_IN_MP(dqp, mp) \
- for ((dqp) = XFS_QI_MPLNEXT(mp); (dqp) != NULL; (dqp) = (dqp)->MPL_NEXT)
-
-#define FOREACH_DQUOT_IN_FREELIST(dqp, qlist) \
-for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \
- (dqp) = (dqp)->dq_flnext)
-
-#define XQM_HASHLIST_INSERT(h, dqp) \
- _LIST_INSERT(h, dqp, HL_PREVP, HL_NEXT)
-
-#define XQM_FREELIST_INSERT(h, dqp) \
- xfs_qm_freelist_append(h, dqp)
-
-#define XQM_MPLIST_INSERT(h, dqp) \
- _LIST_INSERT(h, dqp, MPL_PREVP, MPL_NEXT)
-
-#define XQM_HASHLIST_REMOVE(h, dqp) \
- _LIST_REMOVE(h, dqp, HL_PREVP, HL_NEXT)
-#define XQM_FREELIST_REMOVE(dqp) \
- xfs_qm_freelist_unlink(dqp)
-#define XQM_MPLIST_REMOVE(h, dqp) \
- { _LIST_REMOVE(h, dqp, MPL_PREVP, MPL_NEXT); \
- XFS_QI_MPLRECLAIMS((dqp)->q_mount)++; }
-
-#define XFS_DQ_IS_LOGITEM_INITD(dqp) ((dqp)->q_logitem.qli_dquot == (dqp))
-
-#define XFS_QM_DQP_TO_DQACCT(tp, dqp) (XFS_QM_ISUDQ(dqp) ? \
- (tp)->t_dqinfo->dqa_usrdquots : \
- (tp)->t_dqinfo->dqa_grpdquots)
-#define XFS_IS_SUSER_DQUOT(dqp) \
- (!((dqp)->q_core.d_id))
-
-#define XFS_PURGE_INODE(ip) \
- IRELE(ip);
-
-#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \
- (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \
- (((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???")))
-
-#endif /* __XFS_QUOTA_PRIV_H__ */
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
deleted file mode 100644
index c27abef7b84..00000000000
--- a/fs/xfs/support/debug.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include <xfs.h>
-#include "debug.h"
-
-static char message[1024]; /* keep it off the stack */
-static DEFINE_SPINLOCK(xfs_err_lock);
-
-/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
-#define XFS_MAX_ERR_LEVEL 7
-#define XFS_ERR_MASK ((1 << 3) - 1)
-static const char * const err_level[XFS_MAX_ERR_LEVEL+1] =
- {KERN_EMERG, KERN_ALERT, KERN_CRIT,
- KERN_ERR, KERN_WARNING, KERN_NOTICE,
- KERN_INFO, KERN_DEBUG};
-
-void
-cmn_err(register int level, char *fmt, ...)
-{
- char *fp = fmt;
- int len;
- ulong flags;
- va_list ap;
-
- level &= XFS_ERR_MASK;
- if (level > XFS_MAX_ERR_LEVEL)
- level = XFS_MAX_ERR_LEVEL;
- spin_lock_irqsave(&xfs_err_lock,flags);
- va_start(ap, fmt);
- if (*fmt == '!') fp++;
- len = vsnprintf(message, sizeof(message), fp, ap);
- if (len >= sizeof(message))
- len = sizeof(message) - 1;
- if (message[len-1] == '\n')
- message[len-1] = 0;
- printk("%s%s\n", err_level[level], message);
- va_end(ap);
- spin_unlock_irqrestore(&xfs_err_lock,flags);
- BUG_ON(level == CE_PANIC);
-}
-
-void
-icmn_err(register int level, char *fmt, va_list ap)
-{
- ulong flags;
- int len;
-
- level &= XFS_ERR_MASK;
- if(level > XFS_MAX_ERR_LEVEL)
- level = XFS_MAX_ERR_LEVEL;
- spin_lock_irqsave(&xfs_err_lock,flags);
- len = vsnprintf(message, sizeof(message), fmt, ap);
- if (len >= sizeof(message))
- len = sizeof(message) - 1;
- if (message[len-1] == '\n')
- message[len-1] = 0;
- printk("%s%s\n", err_level[level], message);
- spin_unlock_irqrestore(&xfs_err_lock,flags);
- BUG_ON(level == CE_PANIC);
-}
-
-void
-assfail(char *expr, char *file, int line)
-{
- printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line);
- BUG();
-}
-
-void
-xfs_hex_dump(void *p, int length)
-{
- print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
-}
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
deleted file mode 100644
index 75845f95081..00000000000
--- a/fs/xfs/support/debug.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_SUPPORT_DEBUG_H__
-#define __XFS_SUPPORT_DEBUG_H__
-
-#include <stdarg.h>
-
-#define CE_DEBUG 7 /* debug */
-#define CE_CONT 6 /* continuation */
-#define CE_NOTE 5 /* notice */
-#define CE_WARN 4 /* warning */
-#define CE_ALERT 1 /* alert */
-#define CE_PANIC 0 /* panic */
-
-extern void icmn_err(int, char *, va_list)
- __attribute__ ((format (printf, 2, 0)));
-extern void cmn_err(int, char *, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern void assfail(char *expr, char *f, int l);
-
-#define ASSERT_ALWAYS(expr) \
- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
-
-#ifndef DEBUG
-#define ASSERT(expr) ((void)0)
-
-#ifndef STATIC
-# define STATIC static noinline
-#endif
-
-#ifndef STATIC_INLINE
-# define STATIC_INLINE static inline
-#endif
-
-#else /* DEBUG */
-
-#define ASSERT(expr) \
- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
-
-#ifndef STATIC
-# define STATIC noinline
-#endif
-
-/*
- * We stop inlining of inline functions in debug mode.
- * Unfortunately, this means static inline in header files
- * get multiple definitions, so they need to remain static.
- * This then gives tonnes of warnings about unused but defined
- * functions, so we need to add the unused attribute to prevent
- * these spurious warnings.
- */
-#ifndef STATIC_INLINE
-# define STATIC_INLINE static __attribute__ ((unused)) noinline
-#endif
-
-#endif /* DEBUG */
-
-
-#endif /* __XFS_SUPPORT_DEBUG_H__ */
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
deleted file mode 100644
index 0b75d302508..00000000000
--- a/fs/xfs/support/ktrace.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include <xfs.h>
-
-static kmem_zone_t *ktrace_hdr_zone;
-static kmem_zone_t *ktrace_ent_zone;
-static int ktrace_zentries;
-
-void __init
-ktrace_init(int zentries)
-{
- ktrace_zentries = roundup_pow_of_two(zentries);
-
- ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
- "ktrace_hdr");
- ASSERT(ktrace_hdr_zone);
-
- ktrace_ent_zone = kmem_zone_init(ktrace_zentries
- * sizeof(ktrace_entry_t),
- "ktrace_ent");
- ASSERT(ktrace_ent_zone);
-}
-
-void __exit
-ktrace_uninit(void)
-{
- kmem_zone_destroy(ktrace_hdr_zone);
- kmem_zone_destroy(ktrace_ent_zone);
-}
-
-/*
- * ktrace_alloc()
- *
- * Allocate a ktrace header and enough buffering for the given
- * number of entries. Round the number of entries up to a
- * power of 2 so we can do fast masking to get the index from
- * the atomic index counter.
- */
-ktrace_t *
-ktrace_alloc(int nentries, unsigned int __nocast sleep)
-{
- ktrace_t *ktp;
- ktrace_entry_t *ktep;
- int entries;
-
- ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
-
- if (ktp == (ktrace_t*)NULL) {
- /*
- * KM_SLEEP callers don't expect failure.
- */
- if (sleep & KM_SLEEP)
- panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
-
- return NULL;
- }
-
- /*
- * Special treatment for buffers with the ktrace_zentries entries
- */
- entries = roundup_pow_of_two(nentries);
- if (entries == ktrace_zentries) {
- ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
- sleep);
- } else {
- ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
- sleep | KM_LARGE);
- }
-
- if (ktep == NULL) {
- /*
- * KM_SLEEP callers don't expect failure.
- */
- if (sleep & KM_SLEEP)
- panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
-
- kmem_free(ktp, sizeof(*ktp));
-
- return NULL;
- }
-
- ktp->kt_entries = ktep;
- ktp->kt_nentries = entries;
- ASSERT(is_power_of_2(entries));
- ktp->kt_index_mask = entries - 1;
- atomic_set(&ktp->kt_index, 0);
- ktp->kt_rollover = 0;
- return ktp;
-}
-
-
-/*
- * ktrace_free()
- *
- * Free up the ktrace header and buffer. It is up to the caller
- * to ensure that no-one is referencing it.
- */
-void
-ktrace_free(ktrace_t *ktp)
-{
- int entries_size;
-
- if (ktp == (ktrace_t *)NULL)
- return;
-
- /*
- * Special treatment for the Vnode trace buffer.
- */
- if (ktp->kt_nentries == ktrace_zentries) {
- kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
- } else {
- entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));
-
- kmem_free(ktp->kt_entries, entries_size);
- }
-
- kmem_zone_free(ktrace_hdr_zone, ktp);
-}
-
-
-/*
- * Enter the given values into the "next" entry in the trace buffer.
- * kt_index is always the index of the next entry to be filled.
- */
-void
-ktrace_enter(
- ktrace_t *ktp,
- void *val0,
- void *val1,
- void *val2,
- void *val3,
- void *val4,
- void *val5,
- void *val6,
- void *val7,
- void *val8,
- void *val9,
- void *val10,
- void *val11,
- void *val12,
- void *val13,
- void *val14,
- void *val15)
-{
- int index;
- ktrace_entry_t *ktep;
-
- ASSERT(ktp != NULL);
-
- /*
- * Grab an entry by pushing the index up to the next one.
- */
- index = atomic_add_return(1, &ktp->kt_index);
- index = (index - 1) & ktp->kt_index_mask;
- if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
- ktp->kt_rollover = 1;
-
- ASSERT((index >= 0) && (index < ktp->kt_nentries));
-
- ktep = &(ktp->kt_entries[index]);
-
- ktep->val[0] = val0;
- ktep->val[1] = val1;
- ktep->val[2] = val2;
- ktep->val[3] = val3;
- ktep->val[4] = val4;
- ktep->val[5] = val5;
- ktep->val[6] = val6;
- ktep->val[7] = val7;
- ktep->val[8] = val8;
- ktep->val[9] = val9;
- ktep->val[10] = val10;
- ktep->val[11] = val11;
- ktep->val[12] = val12;
- ktep->val[13] = val13;
- ktep->val[14] = val14;
- ktep->val[15] = val15;
-}
-
-/*
- * Return the number of entries in the trace buffer.
- */
-int
-ktrace_nentries(
- ktrace_t *ktp)
-{
- int index;
- if (ktp == NULL)
- return 0;
-
- index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
- return (ktp->kt_rollover ? ktp->kt_nentries : index);
-}
-
-/*
- * ktrace_first()
- *
- * This is used to find the start of the trace buffer.
- * In conjunction with ktrace_next() it can be used to
- * iterate through the entire trace buffer. This code does
- * not do any locking because it is assumed that it is called
- * from the debugger.
- *
- * The caller must pass in a pointer to a ktrace_snap
- * structure in which we will keep some state used to
- * iterate through the buffer. This state must not touched
- * by any code outside of this module.
- */
-ktrace_entry_t *
-ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
-{
- ktrace_entry_t *ktep;
- int index;
- int nentries;
-
- if (ktp->kt_rollover)
- index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
- else
- index = 0;
-
- ktsp->ks_start = index;
- ktep = &(ktp->kt_entries[index]);
-
- nentries = ktrace_nentries(ktp);
- index++;
- if (index < nentries) {
- ktsp->ks_index = index;
- } else {
- ktsp->ks_index = 0;
- if (index > nentries)
- ktep = NULL;
- }
- return ktep;
-}
-
-/*
- * ktrace_next()
- *
- * This is used to iterate through the entries of the given
- * trace buffer. The caller must pass in the ktrace_snap_t
- * structure initialized by ktrace_first(). The return value
- * will be either a pointer to the next ktrace_entry or NULL
- * if all of the entries have been traversed.
- */
-ktrace_entry_t *
-ktrace_next(
- ktrace_t *ktp,
- ktrace_snap_t *ktsp)
-{
- int index;
- ktrace_entry_t *ktep;
-
- index = ktsp->ks_index;
- if (index == ktsp->ks_start) {
- ktep = NULL;
- } else {
- ktep = &ktp->kt_entries[index];
- }
-
- index++;
- if (index == ktrace_nentries(ktp)) {
- ktsp->ks_index = 0;
- } else {
- ktsp->ks_index = index;
- }
-
- return ktep;
-}
-
-/*
- * ktrace_skip()
- *
- * Skip the next "count" entries and return the entry after that.
- * Return NULL if this causes us to iterate past the beginning again.
- */
-ktrace_entry_t *
-ktrace_skip(
- ktrace_t *ktp,
- int count,
- ktrace_snap_t *ktsp)
-{
- int index;
- int new_index;
- ktrace_entry_t *ktep;
- int nentries = ktrace_nentries(ktp);
-
- index = ktsp->ks_index;
- new_index = index + count;
- while (new_index >= nentries) {
- new_index -= nentries;
- }
- if (index == ktsp->ks_start) {
- /*
- * We've iterated around to the start, so we're done.
- */
- ktep = NULL;
- } else if ((new_index < index) && (index < ktsp->ks_index)) {
- /*
- * We've skipped past the start again, so we're done.
- */
- ktep = NULL;
- ktsp->ks_index = ktsp->ks_start;
- } else {
- ktep = &(ktp->kt_entries[new_index]);
- new_index++;
- if (new_index == nentries) {
- ktsp->ks_index = 0;
- } else {
- ktsp->ks_index = new_index;
- }
- }
- return ktep;
-}
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h
deleted file mode 100644
index 741d6947ca6..00000000000
--- a/fs/xfs/support/ktrace.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_SUPPORT_KTRACE_H__
-#define __XFS_SUPPORT_KTRACE_H__
-
-/*
- * Trace buffer entry structure.
- */
-typedef struct ktrace_entry {
- void *val[16];
-} ktrace_entry_t;
-
-/*
- * Trace buffer header structure.
- */
-typedef struct ktrace {
- int kt_nentries; /* number of entries in trace buf */
- atomic_t kt_index; /* current index in entries */
- unsigned int kt_index_mask;
- int kt_rollover;
- ktrace_entry_t *kt_entries; /* buffer of entries */
-} ktrace_t;
-
-/*
- * Trace buffer snapshot structure.
- */
-typedef struct ktrace_snap {
- int ks_start; /* kt_index at time of snap */
- int ks_index; /* current index */
-} ktrace_snap_t;
-
-
-#ifdef CONFIG_XFS_TRACE
-
-extern void ktrace_init(int zentries);
-extern void ktrace_uninit(void);
-
-extern ktrace_t *ktrace_alloc(int, unsigned int __nocast);
-extern void ktrace_free(ktrace_t *);
-
-extern void ktrace_enter(
- ktrace_t *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *);
-
-extern ktrace_entry_t *ktrace_first(ktrace_t *, ktrace_snap_t *);
-extern int ktrace_nentries(ktrace_t *);
-extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *);
-extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *);
-
-#else
-#define ktrace_init(x) do { } while (0)
-#define ktrace_uninit() do { } while (0)
-#endif /* CONFIG_XFS_TRACE */
-
-#endif /* __XFS_SUPPORT_KTRACE_H__ */
diff --git a/fs/xfs/linux-2.6/time.h b/fs/xfs/time.h
index 387e695a184..387e695a184 100644
--- a/fs/xfs/linux-2.6/time.h
+++ b/fs/xfs/time.h
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/uuid.c
index 493a6ecf859..b83f76b6d41 100644
--- a/fs/xfs/support/uuid.c
+++ b/fs/xfs/uuid.c
@@ -17,10 +17,6 @@
*/
#include <xfs.h>
-static mutex_t uuid_monitor;
-static int uuid_table_size;
-static uuid_t *uuid_table;
-
/* IRIX interpretation of an uuid_t */
typedef struct {
__be32 uu_timelow;
@@ -46,12 +42,6 @@ uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
fsid[1] = be32_to_cpu(uup->uu_timelow);
}
-void
-uuid_create_nil(uuid_t *uuid)
-{
- memset(uuid, 0, sizeof(*uuid));
-}
-
int
uuid_is_nil(uuid_t *uuid)
{
@@ -71,70 +61,3 @@ uuid_equal(uuid_t *uuid1, uuid_t *uuid2)
{
return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1;
}
-
-/*
- * Given a 128-bit uuid, return a 64-bit value by adding the top and bottom
- * 64-bit words. NOTE: This function can not be changed EVER. Although
- * brain-dead, some applications depend on this 64-bit value remaining
- * persistent. Specifically, DMI vendors store the value as a persistent
- * filehandle.
- */
-__uint64_t
-uuid_hash64(uuid_t *uuid)
-{
- __uint64_t *sp = (__uint64_t *)uuid;
-
- return sp[0] + sp[1];
-}
-
-int
-uuid_table_insert(uuid_t *uuid)
-{
- int i, hole;
-
- mutex_lock(&uuid_monitor);
- for (i = 0, hole = -1; i < uuid_table_size; i++) {
- if (uuid_is_nil(&uuid_table[i])) {
- hole = i;
- continue;
- }
- if (uuid_equal(uuid, &uuid_table[i])) {
- mutex_unlock(&uuid_monitor);
- return 0;
- }
- }
- if (hole < 0) {
- uuid_table = kmem_realloc(uuid_table,
- (uuid_table_size + 1) * sizeof(*uuid_table),
- uuid_table_size * sizeof(*uuid_table),
- KM_SLEEP);
- hole = uuid_table_size++;
- }
- uuid_table[hole] = *uuid;
- mutex_unlock(&uuid_monitor);
- return 1;
-}
-
-void
-uuid_table_remove(uuid_t *uuid)
-{
- int i;
-
- mutex_lock(&uuid_monitor);
- for (i = 0; i < uuid_table_size; i++) {
- if (uuid_is_nil(&uuid_table[i]))
- continue;
- if (!uuid_equal(uuid, &uuid_table[i]))
- continue;
- uuid_create_nil(&uuid_table[i]);
- break;
- }
- ASSERT(i < uuid_table_size);
- mutex_unlock(&uuid_monitor);
-}
-
-void __init
-uuid_init(void)
-{
- mutex_init(&uuid_monitor);
-}
diff --git a/fs/xfs/support/uuid.h b/fs/xfs/uuid.h
index b6f5922199b..104db0f3bed 100644
--- a/fs/xfs/support/uuid.h
+++ b/fs/xfs/uuid.h
@@ -22,13 +22,14 @@ typedef struct {
unsigned char __u_bits[16];
} uuid_t;
-extern void uuid_init(void);
-extern void uuid_create_nil(uuid_t *uuid);
extern int uuid_is_nil(uuid_t *uuid);
extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2);
extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]);
-extern __uint64_t uuid_hash64(uuid_t *uuid);
-extern int uuid_table_insert(uuid_t *uuid);
-extern void uuid_table_remove(uuid_t *uuid);
+
+static inline void
+uuid_copy(uuid_t *dst, uuid_t *src)
+{
+ memcpy(dst, src, sizeof(uuid_t));
+}
#endif /* __XFS_SUPPORT_UUID_H__ */
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 540e4c98982..a742c47f7d5 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -22,24 +22,13 @@
#define STATIC
#define DEBUG 1
#define XFS_BUF_LOCK_TRACKING 1
-/* #define QUOTADEBUG 1 */
#endif
-#ifdef CONFIG_XFS_TRACE
-#define XFS_ALLOC_TRACE 1
-#define XFS_ATTR_TRACE 1
-#define XFS_BLI_TRACE 1
-#define XFS_BMAP_TRACE 1
-#define XFS_BMBT_TRACE 1
-#define XFS_DIR2_TRACE 1
-#define XFS_DQUOT_TRACE 1
-#define XFS_ILOCK_TRACE 1
-#define XFS_LOG_TRACE 1
-#define XFS_RW_TRACE 1
-#define XFS_BUF_TRACE 1
-#define XFS_INODE_TRACE 1
-#define XFS_FILESTREAMS_TRACE 1
+#ifdef CONFIG_XFS_WARN
+#define XFS_WARN 1
#endif
-#include <linux-2.6/xfs_linux.h>
+
+#include "xfs_linux.h"
+
#endif /* __XFS_H__ */
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index ebee3a4f703..6888ad886ff 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001-2002,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2008, Christoph Hellwig
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -16,866 +16,291 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_bit.h"
-#include "xfs_inum.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
#include "xfs_inode.h"
-#include "xfs_btree.h"
#include "xfs_acl.h"
#include "xfs_attr.h"
-#include "xfs_vnodeops.h"
-
-#include <linux/capability.h>
+#include "xfs_trace.h"
+#include <linux/slab.h>
+#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
-STATIC int xfs_acl_setmode(bhv_vnode_t *, xfs_acl_t *, int *);
-STATIC void xfs_acl_filter_mode(mode_t, xfs_acl_t *);
-STATIC void xfs_acl_get_endian(xfs_acl_t *);
-STATIC int xfs_acl_access(uid_t, gid_t, xfs_acl_t *, mode_t, cred_t *);
-STATIC int xfs_acl_invalid(xfs_acl_t *);
-STATIC void xfs_acl_sync_mode(mode_t, xfs_acl_t *);
-STATIC void xfs_acl_get_attr(bhv_vnode_t *, xfs_acl_t *, int, int, int *);
-STATIC void xfs_acl_set_attr(bhv_vnode_t *, xfs_acl_t *, int, int *);
-STATIC int xfs_acl_allow_set(bhv_vnode_t *, int);
-
-kmem_zone_t *xfs_acl_zone;
-
/*
- * Test for existence of access ACL attribute as efficiently as possible.
+ * Locking scheme:
+ * - all ACL updates are protected by inode->i_mutex, which is taken before
+ * calling into this file.
*/
-int
-xfs_acl_vhasacl_access(
- bhv_vnode_t *vp)
-{
- int error;
-
- xfs_acl_get_attr(vp, NULL, _ACL_TYPE_ACCESS, ATTR_KERNOVAL, &error);
- return (error == 0);
-}
-
-/*
- * Test for existence of default ACL attribute as efficiently as possible.
- */
-int
-xfs_acl_vhasacl_default(
- bhv_vnode_t *vp)
-{
- int error;
-
- if (!S_ISDIR(vp->i_mode))
- return 0;
- xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error);
- return (error == 0);
-}
-/*
- * Convert from extended attribute representation to in-memory for XFS.
- */
-STATIC int
-posix_acl_xattr_to_xfs(
- posix_acl_xattr_header *src,
- size_t size,
- xfs_acl_t *dest)
+STATIC struct posix_acl *
+xfs_acl_from_disk(
+ struct xfs_acl *aclp,
+ int max_entries)
{
- posix_acl_xattr_entry *src_entry;
- xfs_acl_entry_t *dest_entry;
- int n;
-
- if (!src || !dest)
- return EINVAL;
+ struct posix_acl_entry *acl_e;
+ struct posix_acl *acl;
+ struct xfs_acl_entry *ace;
+ unsigned int count, i;
- if (size < sizeof(posix_acl_xattr_header))
- return EINVAL;
+ count = be32_to_cpu(aclp->acl_cnt);
+ if (count > max_entries)
+ return ERR_PTR(-EFSCORRUPTED);
- if (src->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
- return EOPNOTSUPP;
+ acl = posix_acl_alloc(count, GFP_KERNEL);
+ if (!acl)
+ return ERR_PTR(-ENOMEM);
- memset(dest, 0, sizeof(xfs_acl_t));
- dest->acl_cnt = posix_acl_xattr_count(size);
- if (dest->acl_cnt < 0 || dest->acl_cnt > XFS_ACL_MAX_ENTRIES)
- return EINVAL;
+ for (i = 0; i < count; i++) {
+ acl_e = &acl->a_entries[i];
+ ace = &aclp->acl_entry[i];
- /*
- * acl_set_file(3) may request that we set default ACLs with
- * zero length -- defend (gracefully) against that here.
- */
- if (!dest->acl_cnt)
- return 0;
-
- src_entry = (posix_acl_xattr_entry *)((char *)src + sizeof(*src));
- dest_entry = &dest->acl_entry[0];
+ /*
+ * The tag is 32 bits on disk and 16 bits in core.
+ *
+ * Because every access to it goes through the core
+ * format first this is not a problem.
+ */
+ acl_e->e_tag = be32_to_cpu(ace->ae_tag);
+ acl_e->e_perm = be16_to_cpu(ace->ae_perm);
- for (n = 0; n < dest->acl_cnt; n++, src_entry++, dest_entry++) {
- dest_entry->ae_perm = le16_to_cpu(src_entry->e_perm);
- if (_ACL_PERM_INVALID(dest_entry->ae_perm))
- return EINVAL;
- dest_entry->ae_tag = le16_to_cpu(src_entry->e_tag);
- switch(dest_entry->ae_tag) {
+ switch (acl_e->e_tag) {
case ACL_USER:
+ acl_e->e_uid = xfs_uid_to_kuid(be32_to_cpu(ace->ae_id));
+ break;
case ACL_GROUP:
- dest_entry->ae_id = le32_to_cpu(src_entry->e_id);
+ acl_e->e_gid = xfs_gid_to_kgid(be32_to_cpu(ace->ae_id));
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
- dest_entry->ae_id = ACL_UNDEFINED_ID;
break;
default:
- return EINVAL;
+ goto fail;
}
}
- if (xfs_acl_invalid(dest))
- return EINVAL;
+ return acl;
- return 0;
+fail:
+ posix_acl_release(acl);
+ return ERR_PTR(-EINVAL);
}
-/*
- * Comparison function called from xfs_sort().
- * Primary key is ae_tag, secondary key is ae_id.
- */
-STATIC int
-xfs_acl_entry_compare(
- const void *va,
- const void *vb)
+STATIC void
+xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
{
- xfs_acl_entry_t *a = (xfs_acl_entry_t *)va,
- *b = (xfs_acl_entry_t *)vb;
+ const struct posix_acl_entry *acl_e;
+ struct xfs_acl_entry *ace;
+ int i;
- if (a->ae_tag == b->ae_tag)
- return (a->ae_id - b->ae_id);
- return (a->ae_tag - b->ae_tag);
-}
+ aclp->acl_cnt = cpu_to_be32(acl->a_count);
+ for (i = 0; i < acl->a_count; i++) {
+ ace = &aclp->acl_entry[i];
+ acl_e = &acl->a_entries[i];
-/*
- * Convert from in-memory XFS to extended attribute representation.
- */
-STATIC int
-posix_acl_xfs_to_xattr(
- xfs_acl_t *src,
- posix_acl_xattr_header *dest,
- size_t size)
-{
- int n;
- size_t new_size = posix_acl_xattr_size(src->acl_cnt);
- posix_acl_xattr_entry *dest_entry;
- xfs_acl_entry_t *src_entry;
-
- if (size < new_size)
- return -ERANGE;
-
- /* Need to sort src XFS ACL by <ae_tag,ae_id> */
- xfs_sort(src->acl_entry, src->acl_cnt, sizeof(src->acl_entry[0]),
- xfs_acl_entry_compare);
-
- dest->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
- dest_entry = &dest->a_entries[0];
- src_entry = &src->acl_entry[0];
- for (n = 0; n < src->acl_cnt; n++, dest_entry++, src_entry++) {
- dest_entry->e_perm = cpu_to_le16(src_entry->ae_perm);
- if (_ACL_PERM_INVALID(src_entry->ae_perm))
- return -EINVAL;
- dest_entry->e_tag = cpu_to_le16(src_entry->ae_tag);
- switch (src_entry->ae_tag) {
+ ace->ae_tag = cpu_to_be32(acl_e->e_tag);
+ switch (acl_e->e_tag) {
case ACL_USER:
+ ace->ae_id = cpu_to_be32(xfs_kuid_to_uid(acl_e->e_uid));
+ break;
case ACL_GROUP:
- dest_entry->e_id = cpu_to_le32(src_entry->ae_id);
- break;
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- dest_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
+ ace->ae_id = cpu_to_be32(xfs_kgid_to_gid(acl_e->e_gid));
break;
default:
- return -EINVAL;
- }
- }
- return new_size;
-}
-
-int
-xfs_acl_vget(
- bhv_vnode_t *vp,
- void *acl,
- size_t size,
- int kind)
-{
- int error;
- xfs_acl_t *xfs_acl = NULL;
- posix_acl_xattr_header *ext_acl = acl;
- int flags = 0;
-
- VN_HOLD(vp);
- if(size) {
- if (!(_ACL_ALLOC(xfs_acl))) {
- error = ENOMEM;
- goto out;
+ ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
+ break;
}
- memset(xfs_acl, 0, sizeof(xfs_acl_t));
- } else
- flags = ATTR_KERNOVAL;
-
- xfs_acl_get_attr(vp, xfs_acl, kind, flags, &error);
- if (error)
- goto out;
- if (!size) {
- error = -posix_acl_xattr_size(XFS_ACL_MAX_ENTRIES);
- } else {
- if (xfs_acl_invalid(xfs_acl)) {
- error = EINVAL;
- goto out;
- }
- if (kind == _ACL_TYPE_ACCESS)
- xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, xfs_acl);
- error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size);
+ ace->ae_perm = cpu_to_be16(acl_e->e_perm);
}
-out:
- VN_RELE(vp);
- if(xfs_acl)
- _ACL_FREE(xfs_acl);
- return -error;
}
-int
-xfs_acl_vremove(
- bhv_vnode_t *vp,
- int kind)
+struct posix_acl *
+xfs_get_acl(struct inode *inode, int type)
{
- int error;
-
- VN_HOLD(vp);
- error = xfs_acl_allow_set(vp, kind);
- if (!error) {
- error = xfs_attr_remove(xfs_vtoi(vp),
- kind == _ACL_TYPE_DEFAULT?
- SGI_ACL_DEFAULT: SGI_ACL_FILE,
- ATTR_ROOT);
- if (error == ENOATTR)
- error = 0; /* 'scool */
+ struct xfs_inode *ip = XFS_I(inode);
+ struct posix_acl *acl = NULL;
+ struct xfs_acl *xfs_acl;
+ unsigned char *ea_name;
+ int error;
+ int len;
+
+ trace_xfs_get_acl(ip);
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ ea_name = SGI_ACL_FILE;
+ break;
+ case ACL_TYPE_DEFAULT:
+ ea_name = SGI_ACL_DEFAULT;
+ break;
+ default:
+ BUG();
}
- VN_RELE(vp);
- return -error;
-}
-
-int
-xfs_acl_vset(
- bhv_vnode_t *vp,
- void *acl,
- size_t size,
- int kind)
-{
- posix_acl_xattr_header *ext_acl = acl;
- xfs_acl_t *xfs_acl;
- int error;
- int basicperms = 0; /* more than std unix perms? */
- if (!acl)
- return -EINVAL;
-
- if (!(_ACL_ALLOC(xfs_acl)))
- return -ENOMEM;
+ /*
+ * If we have a cached ACLs value just return it, not need to
+ * go out to the disk.
+ */
+ len = XFS_ACL_MAX_SIZE(ip->i_mount);
+ xfs_acl = kmem_zalloc_large(len, KM_SLEEP);
+ if (!xfs_acl)
+ return ERR_PTR(-ENOMEM);
- error = posix_acl_xattr_to_xfs(ext_acl, size, xfs_acl);
+ error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl,
+ &len, ATTR_ROOT);
if (error) {
- _ACL_FREE(xfs_acl);
- return -error;
- }
- if (!xfs_acl->acl_cnt) {
- _ACL_FREE(xfs_acl);
- return 0;
+ /*
+ * If the attribute doesn't exist make sure we have a negative
+ * cache entry, for any other error assume it is transient and
+ * leave the cache entry as ACL_NOT_CACHED.
+ */
+ if (error == -ENOATTR)
+ goto out_update_cache;
+ goto out;
}
- VN_HOLD(vp);
- error = xfs_acl_allow_set(vp, kind);
-
- /* Incoming ACL exists, set file mode based on its value */
- if (!error && kind == _ACL_TYPE_ACCESS)
- error = xfs_acl_setmode(vp, xfs_acl, &basicperms);
-
- if (error)
+ acl = xfs_acl_from_disk(xfs_acl, XFS_ACL_MAX_ENTRIES(ip->i_mount));
+ if (IS_ERR(acl))
goto out;
- /*
- * If we have more than std unix permissions, set up the actual attr.
- * Otherwise, delete any existing attr. This prevents us from
- * having actual attrs for permissions that can be stored in the
- * standard permission bits.
- */
- if (!basicperms) {
- xfs_acl_set_attr(vp, xfs_acl, kind, &error);
- } else {
- error = -xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
- }
-
+out_update_cache:
+ set_cached_acl(inode, type, acl);
out:
- VN_RELE(vp);
- _ACL_FREE(xfs_acl);
- return -error;
+ kmem_free(xfs_acl);
+ return acl;
}
-int
-xfs_acl_iaccess(
- xfs_inode_t *ip,
- mode_t mode,
- cred_t *cr)
+STATIC int
+__xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
{
- xfs_acl_t *acl;
- int rval;
- struct xfs_name acl_name = {SGI_ACL_FILE, SGI_ACL_FILE_SIZE};
-
- if (!(_ACL_ALLOC(acl)))
- return -1;
-
- /* If the file has no ACL return -1. */
- rval = sizeof(xfs_acl_t);
- if (xfs_attr_fetch(ip, &acl_name, (char *)acl, &rval,
- ATTR_ROOT | ATTR_KERNACCESS)) {
- _ACL_FREE(acl);
- return -1;
- }
- xfs_acl_get_endian(acl);
+ struct xfs_inode *ip = XFS_I(inode);
+ unsigned char *ea_name;
+ int error;
- /* If the file has an empty ACL return -1. */
- if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) {
- _ACL_FREE(acl);
- return -1;
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ ea_name = SGI_ACL_FILE;
+ break;
+ case ACL_TYPE_DEFAULT:
+ if (!S_ISDIR(inode->i_mode))
+ return acl ? -EACCES : 0;
+ ea_name = SGI_ACL_DEFAULT;
+ break;
+ default:
+ return -EINVAL;
}
- /* Synchronize ACL with mode bits */
- xfs_acl_sync_mode(ip->i_d.di_mode, acl);
+ if (acl) {
+ struct xfs_acl *xfs_acl;
+ int len = XFS_ACL_MAX_SIZE(ip->i_mount);
- rval = xfs_acl_access(ip->i_d.di_uid, ip->i_d.di_gid, acl, mode, cr);
- _ACL_FREE(acl);
- return rval;
-}
+ xfs_acl = kmem_zalloc_large(len, KM_SLEEP);
+ if (!xfs_acl)
+ return -ENOMEM;
-STATIC int
-xfs_acl_allow_set(
- bhv_vnode_t *vp,
- int kind)
-{
- if (vp->i_flags & (S_IMMUTABLE|S_APPEND))
- return EPERM;
- if (kind == _ACL_TYPE_DEFAULT && !S_ISDIR(vp->i_mode))
- return ENOTDIR;
- if (vp->i_sb->s_flags & MS_RDONLY)
- return EROFS;
- if (xfs_vtoi(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER))
- return EPERM;
- return 0;
-}
+ xfs_acl_to_disk(xfs_acl, acl);
-/*
- * Note: cr is only used here for the capability check if the ACL test fails.
- * It is not used to find out the credentials uid or groups etc, as was
- * done in IRIX. It is assumed that the uid and groups for the current
- * thread are taken from "current" instead of the cr parameter.
- */
-STATIC int
-xfs_acl_access(
- uid_t fuid,
- gid_t fgid,
- xfs_acl_t *fap,
- mode_t md,
- cred_t *cr)
-{
- xfs_acl_entry_t matched;
- int i, allows;
- int maskallows = -1; /* true, but not 1, either */
- int seen_userobj = 0;
+ /* subtract away the unused acl entries */
+ len -= sizeof(struct xfs_acl_entry) *
+ (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
- matched.ae_tag = 0; /* Invalid type */
- matched.ae_perm = 0;
+ error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
+ len, ATTR_ROOT);
- for (i = 0; i < fap->acl_cnt; i++) {
+ kmem_free(xfs_acl);
+ } else {
/*
- * Break out if we've got a user_obj entry or
- * a user entry and the mask (and have processed USER_OBJ)
+ * A NULL ACL argument means we want to remove the ACL.
*/
- if (matched.ae_tag == ACL_USER_OBJ)
- break;
- if (matched.ae_tag == ACL_USER) {
- if (maskallows != -1 && seen_userobj)
- break;
- if (fap->acl_entry[i].ae_tag != ACL_MASK &&
- fap->acl_entry[i].ae_tag != ACL_USER_OBJ)
- continue;
- }
- /* True if this entry allows the requested access */
- allows = ((fap->acl_entry[i].ae_perm & md) == md);
+ error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT);
- switch (fap->acl_entry[i].ae_tag) {
- case ACL_USER_OBJ:
- seen_userobj = 1;
- if (fuid != current->fsuid)
- continue;
- matched.ae_tag = ACL_USER_OBJ;
- matched.ae_perm = allows;
- break;
- case ACL_USER:
- if (fap->acl_entry[i].ae_id != current->fsuid)
- continue;
- matched.ae_tag = ACL_USER;
- matched.ae_perm = allows;
- break;
- case ACL_GROUP_OBJ:
- if ((matched.ae_tag == ACL_GROUP_OBJ ||
- matched.ae_tag == ACL_GROUP) && !allows)
- continue;
- if (!in_group_p(fgid))
- continue;
- matched.ae_tag = ACL_GROUP_OBJ;
- matched.ae_perm = allows;
- break;
- case ACL_GROUP:
- if ((matched.ae_tag == ACL_GROUP_OBJ ||
- matched.ae_tag == ACL_GROUP) && !allows)
- continue;
- if (!in_group_p(fap->acl_entry[i].ae_id))
- continue;
- matched.ae_tag = ACL_GROUP;
- matched.ae_perm = allows;
- break;
- case ACL_MASK:
- maskallows = allows;
- break;
- case ACL_OTHER:
- if (matched.ae_tag != 0)
- continue;
- matched.ae_tag = ACL_OTHER;
- matched.ae_perm = allows;
- break;
- }
- }
- /*
- * First possibility is that no matched entry allows access.
- * The capability to override DAC may exist, so check for it.
- */
- switch (matched.ae_tag) {
- case ACL_OTHER:
- case ACL_USER_OBJ:
- if (matched.ae_perm)
- return 0;
- break;
- case ACL_USER:
- case ACL_GROUP_OBJ:
- case ACL_GROUP:
- if (maskallows && matched.ae_perm)
- return 0;
- break;
- case 0:
- break;
+ /*
+ * If the attribute didn't exist to start with that's fine.
+ */
+ if (error == -ENOATTR)
+ error = 0;
}
- /* EACCES tells generic_permission to check for capability overrides */
- return EACCES;
+ if (!error)
+ set_cached_acl(inode, type, acl);
+ return error;
}
-/*
- * ACL validity checker.
- * This acl validation routine checks each ACL entry read in makes sense.
- */
-STATIC int
-xfs_acl_invalid(
- xfs_acl_t *aclp)
+static int
+xfs_set_mode(struct inode *inode, umode_t mode)
{
- xfs_acl_entry_t *entry, *e;
- int user = 0, group = 0, other = 0, mask = 0;
- int mask_required = 0;
- int i, j;
+ int error = 0;
- if (!aclp)
- goto acl_invalid;
+ if (mode != inode->i_mode) {
+ struct iattr iattr;
- if (aclp->acl_cnt > XFS_ACL_MAX_ENTRIES)
- goto acl_invalid;
+ iattr.ia_valid = ATTR_MODE | ATTR_CTIME;
+ iattr.ia_mode = mode;
+ iattr.ia_ctime = current_fs_time(inode->i_sb);
- for (i = 0; i < aclp->acl_cnt; i++) {
- entry = &aclp->acl_entry[i];
- switch (entry->ae_tag) {
- case ACL_USER_OBJ:
- if (user++)
- goto acl_invalid;
- break;
- case ACL_GROUP_OBJ:
- if (group++)
- goto acl_invalid;
- break;
- case ACL_OTHER:
- if (other++)
- goto acl_invalid;
- break;
- case ACL_USER:
- case ACL_GROUP:
- for (j = i + 1; j < aclp->acl_cnt; j++) {
- e = &aclp->acl_entry[j];
- if (e->ae_id == entry->ae_id &&
- e->ae_tag == entry->ae_tag)
- goto acl_invalid;
- }
- mask_required++;
- break;
- case ACL_MASK:
- if (mask++)
- goto acl_invalid;
- break;
- default:
- goto acl_invalid;
- }
- }
- if (!user || !group || !other || (mask_required && !mask))
- goto acl_invalid;
- else
- return 0;
-acl_invalid:
- return EINVAL;
-}
-
-/*
- * Do ACL endian conversion.
- */
-STATIC void
-xfs_acl_get_endian(
- xfs_acl_t *aclp)
-{
- xfs_acl_entry_t *ace, *end;
-
- INT_SET(aclp->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
- end = &aclp->acl_entry[0]+aclp->acl_cnt;
- for (ace = &aclp->acl_entry[0]; ace < end; ace++) {
- INT_SET(ace->ae_tag, ARCH_CONVERT, ace->ae_tag);
- INT_SET(ace->ae_id, ARCH_CONVERT, ace->ae_id);
- INT_SET(ace->ae_perm, ARCH_CONVERT, ace->ae_perm);
+ error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
}
-}
-/*
- * Get the ACL from the EA and do endian conversion.
- */
-STATIC void
-xfs_acl_get_attr(
- bhv_vnode_t *vp,
- xfs_acl_t *aclp,
- int kind,
- int flags,
- int *error)
-{
- int len = sizeof(xfs_acl_t);
-
- ASSERT((flags & ATTR_KERNOVAL) ? (aclp == NULL) : 1);
- flags |= ATTR_ROOT;
- *error = xfs_attr_get(xfs_vtoi(vp),
- kind == _ACL_TYPE_ACCESS ?
- SGI_ACL_FILE : SGI_ACL_DEFAULT,
- (char *)aclp, &len, flags);
- if (*error || (flags & ATTR_KERNOVAL))
- return;
- xfs_acl_get_endian(aclp);
+ return error;
}
-/*
- * Set the EA with the ACL and do endian conversion.
- */
-STATIC void
-xfs_acl_set_attr(
- bhv_vnode_t *vp,
- xfs_acl_t *aclp,
- int kind,
- int *error)
+static int
+xfs_acl_exists(struct inode *inode, unsigned char *name)
{
- xfs_acl_entry_t *ace, *newace, *end;
- xfs_acl_t *newacl;
- int len;
+ int len = XFS_ACL_MAX_SIZE(XFS_M(inode->i_sb));
- if (!(_ACL_ALLOC(newacl))) {
- *error = ENOMEM;
- return;
- }
-
- len = sizeof(xfs_acl_t) -
- (sizeof(xfs_acl_entry_t) * (XFS_ACL_MAX_ENTRIES - aclp->acl_cnt));
- end = &aclp->acl_entry[0]+aclp->acl_cnt;
- for (ace = &aclp->acl_entry[0], newace = &newacl->acl_entry[0];
- ace < end;
- ace++, newace++) {
- INT_SET(newace->ae_tag, ARCH_CONVERT, ace->ae_tag);
- INT_SET(newace->ae_id, ARCH_CONVERT, ace->ae_id);
- INT_SET(newace->ae_perm, ARCH_CONVERT, ace->ae_perm);
- }
- INT_SET(newacl->acl_cnt, ARCH_CONVERT, aclp->acl_cnt);
- *error = xfs_attr_set(xfs_vtoi(vp),
- kind == _ACL_TYPE_ACCESS ?
- SGI_ACL_FILE: SGI_ACL_DEFAULT,
- (char *)newacl, len, ATTR_ROOT);
- _ACL_FREE(newacl);
+ return (xfs_attr_get(XFS_I(inode), name, NULL, &len,
+ ATTR_ROOT|ATTR_KERNOVAL) == 0);
}
int
-xfs_acl_vtoacl(
- bhv_vnode_t *vp,
- xfs_acl_t *access_acl,
- xfs_acl_t *default_acl)
+posix_acl_access_exists(struct inode *inode)
{
- int error = 0;
-
- if (access_acl) {
- /*
- * Get the Access ACL and the mode. If either cannot
- * be obtained for some reason, invalidate the access ACL.
- */
- xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error);
- if (error)
- access_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
- else /* We have a good ACL and the file mode, synchronize. */
- xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, access_acl);
- }
-
- if (default_acl) {
- xfs_acl_get_attr(vp, default_acl, _ACL_TYPE_DEFAULT, 0, &error);
- if (error)
- default_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
- }
- return error;
+ return xfs_acl_exists(inode, SGI_ACL_FILE);
}
-/*
- * This function retrieves the parent directory's acl, processes it
- * and lets the child inherit the acl(s) that it should.
- */
int
-xfs_acl_inherit(
- bhv_vnode_t *vp,
- mode_t mode,
- xfs_acl_t *pdaclp)
+posix_acl_default_exists(struct inode *inode)
{
- xfs_acl_t *cacl;
- int error = 0;
- int basicperms = 0;
-
- /*
- * If the parent does not have a default ACL, or it's an
- * invalid ACL, we're done.
- */
- if (!vp)
+ if (!S_ISDIR(inode->i_mode))
return 0;
- if (!pdaclp || xfs_acl_invalid(pdaclp))
- return 0;
-
- /*
- * Copy the default ACL of the containing directory to
- * the access ACL of the new file and use the mode that
- * was passed in to set up the correct initial values for
- * the u::,g::[m::], and o:: entries. This is what makes
- * umask() "work" with ACL's.
- */
-
- if (!(_ACL_ALLOC(cacl)))
- return ENOMEM;
-
- memcpy(cacl, pdaclp, sizeof(xfs_acl_t));
- xfs_acl_filter_mode(mode, cacl);
- error = xfs_acl_setmode(vp, cacl, &basicperms);
- if (error)
- goto out_error;
-
- /*
- * Set the Default and Access ACL on the file. The mode is already
- * set on the file, so we don't need to worry about that.
- *
- * If the new file is a directory, its default ACL is a copy of
- * the containing directory's default ACL.
- */
- if (S_ISDIR(vp->i_mode))
- xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error);
- if (!error && !basicperms)
- xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error);
-out_error:
- _ACL_FREE(cacl);
- return error;
+ return xfs_acl_exists(inode, SGI_ACL_DEFAULT);
}
-/*
- * Set up the correct mode on the file based on the supplied ACL. This
- * makes sure that the mode on the file reflects the state of the
- * u::,g::[m::], and o:: entries in the ACL. Since the mode is where
- * the ACL is going to get the permissions for these entries, we must
- * synchronize the mode whenever we set the ACL on a file.
- */
-STATIC int
-xfs_acl_setmode(
- bhv_vnode_t *vp,
- xfs_acl_t *acl,
- int *basicperms)
+int
+xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
- bhv_vattr_t va;
- xfs_acl_entry_t *ap;
- xfs_acl_entry_t *gap = NULL;
- int i, nomask = 1;
-
- *basicperms = 1;
-
- if (acl->acl_cnt == XFS_ACL_NOT_PRESENT)
- return 0;
+ int error = 0;
- /*
- * Copy the u::, g::, o::, and m:: bits from the ACL into the
- * mode. The m:: bits take precedence over the g:: bits.
- */
- va.va_mask = XFS_AT_MODE;
- va.va_mode = xfs_vtoi(vp)->i_d.di_mode;
- va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO);
- ap = acl->acl_entry;
- for (i = 0; i < acl->acl_cnt; ++i) {
- switch (ap->ae_tag) {
- case ACL_USER_OBJ:
- va.va_mode |= ap->ae_perm << 6;
- break;
- case ACL_GROUP_OBJ:
- gap = ap;
- break;
- case ACL_MASK: /* more than just standard modes */
- nomask = 0;
- va.va_mode |= ap->ae_perm << 3;
- *basicperms = 0;
- break;
- case ACL_OTHER:
- va.va_mode |= ap->ae_perm;
- break;
- default: /* more than just standard modes */
- *basicperms = 0;
- break;
- }
- ap++;
- }
+ if (!acl)
+ goto set_acl;
- /* Set the group bits from ACL_GROUP_OBJ if there's no ACL_MASK */
- if (gap && nomask)
- va.va_mode |= gap->ae_perm << 3;
+ error = -E2BIG;
+ if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
+ return error;
- return xfs_setattr(xfs_vtoi(vp), &va, 0, sys_cred);
-}
+ if (type == ACL_TYPE_ACCESS) {
+ umode_t mode = inode->i_mode;
+ error = posix_acl_equiv_mode(acl, &mode);
-/*
- * The permissions for the special ACL entries (u::, g::[m::], o::) are
- * actually stored in the file mode (if there is both a group and a mask,
- * the group is stored in the ACL entry and the mask is stored on the file).
- * This allows the mode to remain automatically in sync with the ACL without
- * the need for a call-back to the ACL system at every point where the mode
- * could change. This function takes the permissions from the specified mode
- * and places it in the supplied ACL.
- *
- * This implementation draws its validity from the fact that, when the ACL
- * was assigned, the mode was copied from the ACL.
- * If the mode did not change, therefore, the mode remains exactly what was
- * taken from the special ACL entries at assignment.
- * If a subsequent chmod() was done, the POSIX spec says that the change in
- * mode must cause an update to the ACL seen at user level and used for
- * access checks. Before and after a mode change, therefore, the file mode
- * most accurately reflects what the special ACL entries should permit/deny.
- *
- * CAVEAT: If someone sets the SGI_ACL_FILE attribute directly,
- * the existing mode bits will override whatever is in the
- * ACL. Similarly, if there is a pre-existing ACL that was
- * never in sync with its mode (owing to a bug in 6.5 and
- * before), it will now magically (or mystically) be
- * synchronized. This could cause slight astonishment, but
- * it is better than inconsistent permissions.
- *
- * The supplied ACL is a template that may contain any combination
- * of special entries. These are treated as place holders when we fill
- * out the ACL. This routine does not add or remove special entries, it
- * simply unites each special entry with its associated set of permissions.
- */
-STATIC void
-xfs_acl_sync_mode(
- mode_t mode,
- xfs_acl_t *acl)
-{
- int i, nomask = 1;
- xfs_acl_entry_t *ap;
- xfs_acl_entry_t *gap = NULL;
+ if (error <= 0) {
+ acl = NULL;
- /*
- * Set ACL entries. POSIX1003.1eD16 requires that the MASK
- * be set instead of the GROUP entry, if there is a MASK.
- */
- for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) {
- switch (ap->ae_tag) {
- case ACL_USER_OBJ:
- ap->ae_perm = (mode >> 6) & 0x7;
- break;
- case ACL_GROUP_OBJ:
- gap = ap;
- break;
- case ACL_MASK:
- nomask = 0;
- ap->ae_perm = (mode >> 3) & 0x7;
- break;
- case ACL_OTHER:
- ap->ae_perm = mode & 0x7;
- break;
- default:
- break;
+ if (error < 0)
+ return error;
}
- }
- /* Set the ACL_GROUP_OBJ if there's no ACL_MASK */
- if (gap && nomask)
- gap->ae_perm = (mode >> 3) & 0x7;
-}
-/*
- * When inheriting an Access ACL from a directory Default ACL,
- * the ACL bits are set to the intersection of the ACL default
- * permission bits and the file permission bits in mode. If there
- * are no permission bits on the file then we must not give them
- * the ACL. This is what what makes umask() work with ACLs.
- */
-STATIC void
-xfs_acl_filter_mode(
- mode_t mode,
- xfs_acl_t *acl)
-{
- int i, nomask = 1;
- xfs_acl_entry_t *ap;
- xfs_acl_entry_t *gap = NULL;
-
- /*
- * Set ACL entries. POSIX1003.1eD16 requires that the MASK
- * be merged with GROUP entry, if there is a MASK.
- */
- for (ap = acl->acl_entry, i = 0; i < acl->acl_cnt; ap++, i++) {
- switch (ap->ae_tag) {
- case ACL_USER_OBJ:
- ap->ae_perm &= (mode >> 6) & 0x7;
- break;
- case ACL_GROUP_OBJ:
- gap = ap;
- break;
- case ACL_MASK:
- nomask = 0;
- ap->ae_perm &= (mode >> 3) & 0x7;
- break;
- case ACL_OTHER:
- ap->ae_perm &= mode & 0x7;
- break;
- default:
- break;
- }
+ error = xfs_set_mode(inode, mode);
+ if (error)
+ return error;
}
- /* Set the ACL_GROUP_OBJ if there's no ACL_MASK */
- if (gap && nomask)
- gap->ae_perm &= (mode >> 3) & 0x7;
+
+ set_acl:
+ return __xfs_set_acl(inode, type, acl);
}
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 332a772461c..5dc16374451 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -18,82 +18,58 @@
#ifndef __XFS_ACL_H__
#define __XFS_ACL_H__
-/*
- * Access Control Lists
- */
-typedef __uint16_t xfs_acl_perm_t;
-typedef __int32_t xfs_acl_type_t;
-typedef __int32_t xfs_acl_tag_t;
-typedef __int32_t xfs_acl_id_t;
+struct inode;
+struct posix_acl;
+struct xfs_inode;
-#define XFS_ACL_MAX_ENTRIES 25
#define XFS_ACL_NOT_PRESENT (-1)
-typedef struct xfs_acl_entry {
- xfs_acl_tag_t ae_tag;
- xfs_acl_id_t ae_id;
- xfs_acl_perm_t ae_perm;
-} xfs_acl_entry_t;
+/* On-disk XFS access control list structure */
+struct xfs_acl_entry {
+ __be32 ae_tag;
+ __be32 ae_id;
+ __be16 ae_perm;
+ __be16 ae_pad; /* fill the implicit hole in the structure */
+};
-typedef struct xfs_acl {
- __int32_t acl_cnt;
- xfs_acl_entry_t acl_entry[XFS_ACL_MAX_ENTRIES];
-} xfs_acl_t;
+struct xfs_acl {
+ __be32 acl_cnt;
+ struct xfs_acl_entry acl_entry[0];
+};
+
+/*
+ * The number of ACL entries allowed is defined by the on-disk format.
+ * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is
+ * limited only by the maximum size of the xattr that stores the information.
+ */
+#define XFS_ACL_MAX_ENTRIES(mp) \
+ (xfs_sb_version_hascrc(&mp->m_sb) \
+ ? (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
+ sizeof(struct xfs_acl_entry) \
+ : 25)
+
+#define XFS_ACL_MAX_SIZE(mp) \
+ (sizeof(struct xfs_acl) + \
+ sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp)))
/* On-disk XFS extended attribute names */
-#define SGI_ACL_FILE "SGI_ACL_FILE"
-#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT"
+#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
+#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
-
#ifdef CONFIG_XFS_POSIX_ACL
-
-struct vattr;
-struct xfs_inode;
-
-extern struct kmem_zone *xfs_acl_zone;
-#define xfs_acl_zone_init(zone, name) \
- (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name))
-#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone)
-
-extern int xfs_acl_inherit(bhv_vnode_t *, mode_t mode, xfs_acl_t *);
-extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *);
-extern int xfs_acl_vtoacl(bhv_vnode_t *, xfs_acl_t *, xfs_acl_t *);
-extern int xfs_acl_vhasacl_access(bhv_vnode_t *);
-extern int xfs_acl_vhasacl_default(bhv_vnode_t *);
-extern int xfs_acl_vset(bhv_vnode_t *, void *, size_t, int);
-extern int xfs_acl_vget(bhv_vnode_t *, void *, size_t, int);
-extern int xfs_acl_vremove(bhv_vnode_t *, int);
-
-#define _ACL_TYPE_ACCESS 1
-#define _ACL_TYPE_DEFAULT 2
-#define _ACL_PERM_INVALID(perm) ((perm) & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE))
-
-#define _ACL_INHERIT(c,m,d) (xfs_acl_inherit(c,m,d))
-#define _ACL_GET_ACCESS(pv,pa) (xfs_acl_vtoacl(pv,pa,NULL) == 0)
-#define _ACL_GET_DEFAULT(pv,pd) (xfs_acl_vtoacl(pv,NULL,pd) == 0)
-#define _ACL_ACCESS_EXISTS xfs_acl_vhasacl_access
-#define _ACL_DEFAULT_EXISTS xfs_acl_vhasacl_default
-
-#define _ACL_ALLOC(a) ((a) = kmem_zone_alloc(xfs_acl_zone, KM_SLEEP))
-#define _ACL_FREE(a) ((a)? kmem_zone_free(xfs_acl_zone, (a)):(void)0)
-
+extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
+extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int posix_acl_access_exists(struct inode *inode);
+extern int posix_acl_default_exists(struct inode *inode);
#else
-#define xfs_acl_zone_init(zone,name)
-#define xfs_acl_zone_destroy(zone)
-#define xfs_acl_vset(v,p,sz,t) (-EOPNOTSUPP)
-#define xfs_acl_vget(v,p,sz,t) (-EOPNOTSUPP)
-#define xfs_acl_vremove(v,t) (-EOPNOTSUPP)
-#define xfs_acl_vhasacl_access(v) (0)
-#define xfs_acl_vhasacl_default(v) (0)
-#define _ACL_ALLOC(a) (1) /* successfully allocate nothing */
-#define _ACL_FREE(a) ((void)0)
-#define _ACL_INHERIT(c,m,d) (0)
-#define _ACL_GET_ACCESS(pv,pa) (0)
-#define _ACL_GET_DEFAULT(pv,pd) (0)
-#define _ACL_ACCESS_EXISTS (NULL)
-#define _ACL_DEFAULT_EXISTS (NULL)
-#endif
-
+static inline struct posix_acl *xfs_get_acl(struct inode *inode, int type)
+{
+ return NULL;
+}
+# define xfs_set_acl NULL
+# define posix_acl_access_exists(inode) 0
+# define posix_acl_default_exists(inode) 0
+#endif /* CONFIG_XFS_POSIX_ACL */
#endif /* __XFS_ACL_H__ */
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 61b292a9fb4..6e247a99f5d 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -30,6 +30,7 @@ struct xfs_trans;
#define XFS_AGF_MAGIC 0x58414746 /* 'XAGF' */
#define XFS_AGI_MAGIC 0x58414749 /* 'XAGI' */
+#define XFS_AGFL_MAGIC 0x5841464c /* 'XAFL' */
#define XFS_AGF_VERSION 1
#define XFS_AGI_VERSION 1
@@ -63,14 +64,33 @@ typedef struct xfs_agf {
__be32 agf_spare0; /* spare field */
__be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */
__be32 agf_spare1; /* spare field */
+
__be32 agf_flfirst; /* first freelist block's index */
__be32 agf_fllast; /* last freelist block's index */
__be32 agf_flcount; /* count of blocks in freelist */
__be32 agf_freeblks; /* total free blocks */
+
__be32 agf_longest; /* longest free space */
__be32 agf_btreeblks; /* # of blocks held in AGF btrees */
+ uuid_t agf_uuid; /* uuid of filesystem */
+
+ /*
+ * reserve some contiguous space for future logged fields before we add
+ * the unlogged fields. This makes the range logging via flags and
+ * structure offsets much simpler.
+ */
+ __be64 agf_spare64[16];
+
+ /* unlogged fields, written during buffer writeback. */
+ __be64 agf_lsn; /* last write sequence */
+ __be32 agf_crc; /* crc of agf sector */
+ __be32 agf_spare2;
+
+ /* structure must be padded to 64 bit alignment */
} xfs_agf_t;
+#define XFS_AGF_CRC_OFF offsetof(struct xfs_agf, agf_crc)
+
#define XFS_AGF_MAGICNUM 0x00000001
#define XFS_AGF_VERSIONNUM 0x00000002
#define XFS_AGF_SEQNO 0x00000004
@@ -83,14 +103,32 @@ typedef struct xfs_agf {
#define XFS_AGF_FREEBLKS 0x00000200
#define XFS_AGF_LONGEST 0x00000400
#define XFS_AGF_BTREEBLKS 0x00000800
-#define XFS_AGF_NUM_BITS 12
+#define XFS_AGF_UUID 0x00001000
+#define XFS_AGF_NUM_BITS 13
#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
+#define XFS_AGF_FLAGS \
+ { XFS_AGF_MAGICNUM, "MAGICNUM" }, \
+ { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \
+ { XFS_AGF_SEQNO, "SEQNO" }, \
+ { XFS_AGF_LENGTH, "LENGTH" }, \
+ { XFS_AGF_ROOTS, "ROOTS" }, \
+ { XFS_AGF_LEVELS, "LEVELS" }, \
+ { XFS_AGF_FLFIRST, "FLFIRST" }, \
+ { XFS_AGF_FLLAST, "FLLAST" }, \
+ { XFS_AGF_FLCOUNT, "FLCOUNT" }, \
+ { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
+ { XFS_AGF_LONGEST, "LONGEST" }, \
+ { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \
+ { XFS_AGF_UUID, "UUID" }
+
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
-#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)XFS_BUF_PTR(bp))
+#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)((bp)->b_addr))
+extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
/*
* Size of the unlinked inode hash table in the agi.
@@ -114,6 +152,7 @@ typedef struct xfs_agi {
__be32 agi_root; /* root of inode btree */
__be32 agi_level; /* levels in inode btree */
__be32 agi_freecount; /* number of free inodes */
+
__be32 agi_newino; /* new inode just allocated */
__be32 agi_dirino; /* last directory inode chunk */
/*
@@ -121,26 +160,46 @@ typedef struct xfs_agi {
* still being referenced.
*/
__be32 agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
+ /*
+ * This marks the end of logging region 1 and start of logging region 2.
+ */
+ uuid_t agi_uuid; /* uuid of filesystem */
+ __be32 agi_crc; /* crc of agi sector */
+ __be32 agi_pad32;
+ __be64 agi_lsn; /* last write sequence */
+
+ __be32 agi_free_root; /* root of the free inode btree */
+ __be32 agi_free_level;/* levels in free inode btree */
+
+ /* structure must be padded to 64 bit alignment */
} xfs_agi_t;
-#define XFS_AGI_MAGICNUM 0x00000001
-#define XFS_AGI_VERSIONNUM 0x00000002
-#define XFS_AGI_SEQNO 0x00000004
-#define XFS_AGI_LENGTH 0x00000008
-#define XFS_AGI_COUNT 0x00000010
-#define XFS_AGI_ROOT 0x00000020
-#define XFS_AGI_LEVEL 0x00000040
-#define XFS_AGI_FREECOUNT 0x00000080
-#define XFS_AGI_NEWINO 0x00000100
-#define XFS_AGI_DIRINO 0x00000200
-#define XFS_AGI_UNLINKED 0x00000400
-#define XFS_AGI_NUM_BITS 11
-#define XFS_AGI_ALL_BITS ((1 << XFS_AGI_NUM_BITS) - 1)
+#define XFS_AGI_CRC_OFF offsetof(struct xfs_agi, agi_crc)
+
+#define XFS_AGI_MAGICNUM (1 << 0)
+#define XFS_AGI_VERSIONNUM (1 << 1)
+#define XFS_AGI_SEQNO (1 << 2)
+#define XFS_AGI_LENGTH (1 << 3)
+#define XFS_AGI_COUNT (1 << 4)
+#define XFS_AGI_ROOT (1 << 5)
+#define XFS_AGI_LEVEL (1 << 6)
+#define XFS_AGI_FREECOUNT (1 << 7)
+#define XFS_AGI_NEWINO (1 << 8)
+#define XFS_AGI_DIRINO (1 << 9)
+#define XFS_AGI_UNLINKED (1 << 10)
+#define XFS_AGI_NUM_BITS_R1 11 /* end of the 1st agi logging region */
+#define XFS_AGI_ALL_BITS_R1 ((1 << XFS_AGI_NUM_BITS_R1) - 1)
+#define XFS_AGI_FREE_ROOT (1 << 11)
+#define XFS_AGI_FREE_LEVEL (1 << 12)
+#define XFS_AGI_NUM_BITS_R2 13
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log))
#define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp))
-#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)XFS_BUF_PTR(bp))
+#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)((bp)->b_addr))
+
+extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_agnumber_t agno, struct xfs_buf **bpp);
/*
* The third a.g. block contains the a.g. freelist, an array
@@ -148,60 +207,42 @@ typedef struct xfs_agi {
*/
#define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log))
#define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp))
-#define XFS_AGFL_SIZE(mp) ((mp)->m_sb.sb_sectsize / sizeof(xfs_agblock_t))
-#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)XFS_BUF_PTR(bp))
+#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)((bp)->b_addr))
-typedef struct xfs_agfl {
- __be32 agfl_bno[1]; /* actually XFS_AGFL_SIZE(mp) */
-} xfs_agfl_t;
+#define XFS_BUF_TO_AGFL_BNO(mp, bp) \
+ (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
+ &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
+ (__be32 *)(bp)->b_addr)
/*
- * Busy block/extent entry. Used in perag to mark blocks that have been freed
- * but whose transactions aren't committed to disk yet.
+ * Size of the AGFL. For CRC-enabled filesystes we steal a couple of
+ * slots in the beginning of the block for a proper header with the
+ * location information and CRC.
*/
-typedef struct xfs_perag_busy {
- xfs_agblock_t busy_start;
- xfs_extlen_t busy_length;
- struct xfs_trans *busy_tp; /* transaction that did the free */
-} xfs_perag_busy_t;
+#define XFS_AGFL_SIZE(mp) \
+ (((mp)->m_sb.sb_sectsize - \
+ (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
+ sizeof(struct xfs_agfl) : 0)) / \
+ sizeof(xfs_agblock_t))
+
+typedef struct xfs_agfl {
+ __be32 agfl_magicnum;
+ __be32 agfl_seqno;
+ uuid_t agfl_uuid;
+ __be64 agfl_lsn;
+ __be32 agfl_crc;
+ __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
+} xfs_agfl_t;
+
+#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
/*
- * Per-ag incore structure, copies of information in agf and agi,
- * to improve the performance of allocation group selection.
- *
- * pick sizes which fit in allocation buckets well
+ * tags for inode radix tree
*/
-#if (BITS_PER_LONG == 32)
-#define XFS_PAGB_NUM_SLOTS 84
-#elif (BITS_PER_LONG == 64)
-#define XFS_PAGB_NUM_SLOTS 128
-#endif
-
-typedef struct xfs_perag
-{
- char pagf_init; /* this agf's entry is initialized */
- char pagi_init; /* this agi's entry is initialized */
- char pagf_metadata; /* the agf is preferred to be metadata */
- char pagi_inodeok; /* The agi is ok for inodes */
- __uint8_t pagf_levels[XFS_BTNUM_AGF];
- /* # of levels in bno & cnt btree */
- __uint32_t pagf_flcount; /* count of blocks in freelist */
- xfs_extlen_t pagf_freeblks; /* total free blocks */
- xfs_extlen_t pagf_longest; /* longest free space */
- __uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
- xfs_agino_t pagi_freecount; /* number of free inodes */
- xfs_agino_t pagi_count; /* number of allocated inodes */
- int pagb_count; /* pagb slots in use */
-#ifdef __KERNEL__
- spinlock_t pagb_lock; /* lock for pagb_list */
-#endif
- xfs_perag_busy_t *pagb_list; /* unstable blocks */
- atomic_t pagf_fstrms; /* # of filestreams active in this AG */
-
- int pag_ici_init; /* incore inode cache initialised */
- rwlock_t pag_ici_lock; /* incore inode lock */
- struct radix_tree_root pag_ici_root; /* incore inode cache root */
-} xfs_perag_t;
+#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
+ in xfs_inode_ag_iterator */
+#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
+#define XFS_ICI_EOFBLOCKS_TAG 1 /* inode has blocks beyond EOF */
#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
#define XFS_MIN_FREELIST_RAW(bl,cl,mp) \
@@ -212,15 +253,15 @@ typedef struct xfs_perag
be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp))
#define XFS_MIN_FREELIST_PAG(pag,mp) \
(XFS_MIN_FREELIST_RAW( \
- (uint_t)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
- (uint_t)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp))
+ (unsigned int)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
+ (unsigned int)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp))
#define XFS_AGB_TO_FSB(mp,agno,agbno) \
(((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno))
#define XFS_FSB_TO_AGNO(mp,fsbno) \
((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog))
#define XFS_FSB_TO_AGBNO(mp,fsbno) \
- ((xfs_agblock_t)((fsbno) & XFS_MASK32LO((mp)->m_sb.sb_agblklog)))
+ ((xfs_agblock_t)((fsbno) & xfs_mask32lo((mp)->m_sb.sb_agblklog)))
#define XFS_AGB_TO_DADDR(mp,agno,agbno) \
((xfs_daddr_t)XFS_FSB_TO_BB(mp, \
(xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno)))
@@ -233,8 +274,8 @@ typedef struct xfs_perag
#define XFS_AG_CHECK_DADDR(mp,d,len) \
((len) == 1 ? \
ASSERT((d) == XFS_SB_DADDR || \
- XFS_DADDR_TO_AGBNO(mp, d) != XFS_SB_DADDR) : \
- ASSERT(XFS_DADDR_TO_AGNO(mp, d) == \
- XFS_DADDR_TO_AGNO(mp, (d) + (len) - 1)))
+ xfs_daddr_to_agbno(mp, d) != XFS_SB_DADDR) : \
+ ASSERT(xfs_daddr_to_agno(mp, d) == \
+ xfs_daddr_to_agno(mp, (d) + (len) - 1)))
#endif /* __XFS_AG_H__ */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 1956f83489f..d43813267a8 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -17,77 +17,124 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_ialloc.h"
+#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
+#include "xfs_extent_busy.h"
#include "xfs_error.h"
+#include "xfs_cksum.h"
+#include "xfs_trace.h"
+#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+#include "xfs_log.h"
+struct workqueue_struct *xfs_alloc_wq;
#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
#define XFSA_FIXUP_BNO_OK 1
#define XFSA_FIXUP_CNT_OK 2
-STATIC void
-xfs_alloc_search_busy(xfs_trans_t *tp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- xfs_extlen_t len);
-
-#if defined(XFS_ALLOC_TRACE)
-ktrace_t *xfs_alloc_trace_buf;
-
-#define TRACE_ALLOC(s,a) \
- xfs_alloc_trace_alloc(__func__, s, a, __LINE__)
-#define TRACE_FREE(s,a,b,x,f) \
- xfs_alloc_trace_free(__func__, s, mp, a, b, x, f, __LINE__)
-#define TRACE_MODAGF(s,a,f) \
- xfs_alloc_trace_modagf(__func__, s, mp, a, f, __LINE__)
-#define TRACE_BUSY(__func__,s,ag,agb,l,sl,tp) \
- xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__)
-#define TRACE_UNBUSY(__func__,s,ag,sl,tp) \
- xfs_alloc_trace_busy(__func__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__)
-#define TRACE_BUSYSEARCH(__func__,s,ag,agb,l,tp) \
- xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, 0, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__)
-#else
-#define TRACE_ALLOC(s,a)
-#define TRACE_FREE(s,a,b,x,f)
-#define TRACE_MODAGF(s,a,f)
-#define TRACE_BUSY(s,a,ag,agb,l,sl,tp)
-#define TRACE_UNBUSY(fname,s,ag,sl,tp)
-#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,tp)
-#endif /* XFS_ALLOC_TRACE */
-
-/*
- * Prototypes for per-ag allocation routines
- */
-
STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
- xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
+ xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
+
+/*
+ * Lookup the record equal to [bno, len] in the btree given by cur.
+ */
+STATIC int /* error */
+xfs_alloc_lookup_eq(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t bno, /* starting block of extent */
+ xfs_extlen_t len, /* length of extent */
+ int *stat) /* success/failure */
+{
+ cur->bc_rec.a.ar_startblock = bno;
+ cur->bc_rec.a.ar_blockcount = len;
+ return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+}
+
+/*
+ * Lookup the first record greater than or equal to [bno, len]
+ * in the btree given by cur.
+ */
+int /* error */
+xfs_alloc_lookup_ge(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t bno, /* starting block of extent */
+ xfs_extlen_t len, /* length of extent */
+ int *stat) /* success/failure */
+{
+ cur->bc_rec.a.ar_startblock = bno;
+ cur->bc_rec.a.ar_blockcount = len;
+ return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
+}
+
+/*
+ * Lookup the first record less than or equal to [bno, len]
+ * in the btree given by cur.
+ */
+int /* error */
+xfs_alloc_lookup_le(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t bno, /* starting block of extent */
+ xfs_extlen_t len, /* length of extent */
+ int *stat) /* success/failure */
+{
+ cur->bc_rec.a.ar_startblock = bno;
+ cur->bc_rec.a.ar_blockcount = len;
+ return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
+}
/*
- * Internal functions.
+ * Update the record referred to by cur to the value given
+ * by [bno, len].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
*/
+STATIC int /* error */
+xfs_alloc_update(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t bno, /* starting block of extent */
+ xfs_extlen_t len) /* length of extent */
+{
+ union xfs_btree_rec rec;
+
+ rec.alloc.ar_startblock = cpu_to_be32(bno);
+ rec.alloc.ar_blockcount = cpu_to_be32(len);
+ return xfs_btree_update(cur, &rec);
+}
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int /* error */
+xfs_alloc_get_rec(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t *bno, /* output: starting block of extent */
+ xfs_extlen_t *len, /* output: length of extent */
+ int *stat) /* output: success/failure */
+{
+ union xfs_btree_rec *rec;
+ int error;
+
+ error = xfs_btree_get_rec(cur, &rec, stat);
+ if (!error && *stat == 1) {
+ *bno = be32_to_cpu(rec->alloc.ar_startblock);
+ *len = be32_to_cpu(rec->alloc.ar_blockcount);
+ }
+ return error;
+}
/*
* Compute aligned version of the found extent.
@@ -95,27 +142,28 @@ STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
*/
STATIC void
xfs_alloc_compute_aligned(
+ xfs_alloc_arg_t *args, /* allocation argument structure */
xfs_agblock_t foundbno, /* starting block in found extent */
xfs_extlen_t foundlen, /* length in found extent */
- xfs_extlen_t alignment, /* alignment for allocation */
- xfs_extlen_t minlen, /* minimum length for allocation */
xfs_agblock_t *resbno, /* result block number */
xfs_extlen_t *reslen) /* result length */
{
xfs_agblock_t bno;
- xfs_extlen_t diff;
xfs_extlen_t len;
- if (alignment > 1 && foundlen >= minlen) {
- bno = roundup(foundbno, alignment);
- diff = bno - foundbno;
- len = diff >= foundlen ? 0 : foundlen - diff;
+ /* Trim busy sections out of found extent */
+ xfs_extent_busy_trim(args, foundbno, foundlen, &bno, &len);
+
+ if (args->alignment > 1 && len >= args->minlen) {
+ xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
+ xfs_extlen_t diff = aligned_bno - bno;
+
+ *resbno = aligned_bno;
+ *reslen = diff >= len ? 0 : len - diff;
} else {
- bno = foundbno;
- len = foundlen;
+ *resbno = bno;
+ *reslen = len;
}
- *resbno = bno;
- *reslen = len;
}
/*
@@ -127,6 +175,7 @@ xfs_alloc_compute_diff(
xfs_agblock_t wantbno, /* target starting block */
xfs_extlen_t wantlen, /* target length */
xfs_extlen_t alignment, /* target alignment */
+ char userdata, /* are we allocating data? */
xfs_agblock_t freebno, /* freespace's starting block */
xfs_extlen_t freelen, /* freespace's length */
xfs_agblock_t *newbnop) /* result: best start block from free */
@@ -141,7 +190,14 @@ xfs_alloc_compute_diff(
ASSERT(freelen >= wantlen);
freeend = freebno + freelen;
wantend = wantbno + wantlen;
- if (freebno >= wantbno) {
+ /*
+ * We want to allocate from the start of a free extent if it is past
+ * the desired block or if we are allocating user data and the free
+ * extent is before desired block. The second case is there to allow
+ * for contiguous allocation from the remaining free space if the file
+ * grows in the short term.
+ */
+ if (freebno >= wantbno || (userdata && freeend < wantend)) {
if ((newbno1 = roundup(freebno, alignment)) >= freeend)
newbno1 = NULLAGBLOCK;
} else if (freeend >= wantend && alignment > 1) {
@@ -201,16 +257,14 @@ xfs_alloc_fix_len(
k = rlen % args->prod;
if (k == args->mod)
return;
- if (k > args->mod) {
- if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
- return;
- } else {
- if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
- (int)args->minlen)
- return;
- }
- ASSERT(rlen >= args->minlen);
- ASSERT(rlen <= args->maxlen);
+ if (k > args->mod)
+ rlen = rlen - (k - args->mod);
+ else
+ rlen = rlen - args->prod + (args->mod - k);
+ if ((int)rlen < (int)args->minlen)
+ return;
+ ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
+ ASSERT(rlen % args->prod == args->mod);
args->len = rlen;
}
@@ -229,7 +283,6 @@ xfs_alloc_fix_minleft(
return 1;
agf = XFS_BUF_TO_AGF(args->agbp);
diff = be32_to_cpu(agf->agf_freeblks)
- + be32_to_cpu(agf->agf_flcount)
- args->len - args->minleft;
if (diff >= 0)
return 1;
@@ -294,21 +347,20 @@ xfs_alloc_fixup_trees(
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
}
+
#ifdef DEBUG
- {
- xfs_alloc_block_t *bnoblock;
- xfs_alloc_block_t *cntblock;
-
- if (bno_cur->bc_nlevels == 1 &&
- cnt_cur->bc_nlevels == 1) {
- bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]);
- cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]);
- XFS_WANT_CORRUPTED_RETURN(
- be16_to_cpu(bnoblock->bb_numrecs) ==
- be16_to_cpu(cntblock->bb_numrecs));
- }
+ if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
+ struct xfs_btree_block *bnoblock;
+ struct xfs_btree_block *cntblock;
+
+ bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
+ cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
+
+ XFS_WANT_CORRUPTED_RETURN(
+ bnoblock->bb_numrecs == cntblock->bb_numrecs);
}
#endif
+
/*
* Deal with all four cases: the allocated record is contained
* within the freespace record, so we can have new freespace
@@ -333,7 +385,7 @@ xfs_alloc_fixup_trees(
/*
* Delete the entry from the by-size btree.
*/
- if ((error = xfs_alloc_delete(cnt_cur, &i)))
+ if ((error = xfs_btree_delete(cnt_cur, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
/*
@@ -343,7 +395,7 @@ xfs_alloc_fixup_trees(
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 0);
- if ((error = xfs_alloc_insert(cnt_cur, &i)))
+ if ((error = xfs_btree_insert(cnt_cur, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
}
@@ -351,7 +403,7 @@ xfs_alloc_fixup_trees(
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 0);
- if ((error = xfs_alloc_insert(cnt_cur, &i)))
+ if ((error = xfs_btree_insert(cnt_cur, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
}
@@ -362,7 +414,7 @@ xfs_alloc_fixup_trees(
/*
* No remaining freespace, just delete the by-block tree entry.
*/
- if ((error = xfs_alloc_delete(bno_cur, &i)))
+ if ((error = xfs_btree_delete(bno_cur, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
} else {
@@ -379,13 +431,94 @@ xfs_alloc_fixup_trees(
if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 0);
- if ((error = xfs_alloc_insert(bno_cur, &i)))
+ if ((error = xfs_btree_insert(bno_cur, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
}
return 0;
}
+static bool
+xfs_agfl_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
+ int i;
+
+ if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_uuid))
+ return false;
+ if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
+ return false;
+ /*
+ * during growfs operations, the perag is not fully initialised,
+ * so we can't use it for any useful checking. growfs ensures we can't
+ * use it by using uncached buffers that don't have the perag attached
+ * so we can detect and avoid this problem.
+ */
+ if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
+ return false;
+
+ for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
+ if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
+ be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
+ return false;
+ }
+ return true;
+}
+
+static void
+xfs_agfl_read_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+
+ /*
+ * There is no verification of non-crc AGFLs because mkfs does not
+ * initialise the AGFL to zero or NULL. Hence the only valid part of the
+ * AGFL is what the AGF says is active. We can't get to the AGF, so we
+ * can't verify just those entries are valid.
+ */
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return;
+
+ if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
+ xfs_buf_ioerror(bp, EFSBADCRC);
+ else if (!xfs_agfl_verify(bp))
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+
+ if (bp->b_error)
+ xfs_verifier_error(bp);
+}
+
+static void
+xfs_agfl_write_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_buf_log_item *bip = bp->b_fspriv;
+
+ /* no verification of non-crc AGFLs */
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return;
+
+ if (!xfs_agfl_verify(bp)) {
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ xfs_verifier_error(bp);
+ return;
+ }
+
+ if (bip)
+ XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+
+ xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
+}
+
+const struct xfs_buf_ops xfs_agfl_buf_ops = {
+ .verify_read = xfs_agfl_read_verify,
+ .verify_write = xfs_agfl_write_verify,
+};
+
/*
* Read in the allocation group free block array.
*/
@@ -403,133 +536,34 @@ xfs_alloc_read_agfl(
error = xfs_trans_read_buf(
mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
- XFS_FSS_TO_BB(mp, 1), 0, &bp);
+ XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
if (error)
return error;
- ASSERT(bp);
- ASSERT(!XFS_BUF_GETERROR(bp));
- XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF);
+ xfs_buf_set_ref(bp, XFS_AGFL_REF);
*bpp = bp;
return 0;
}
-#if defined(XFS_ALLOC_TRACE)
-/*
- * Add an allocation trace entry for an alloc call.
- */
-STATIC void
-xfs_alloc_trace_alloc(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_alloc_arg_t *args, /* allocation argument structure */
- int line) /* source line number */
+STATIC int
+xfs_alloc_update_counters(
+ struct xfs_trans *tp,
+ struct xfs_perag *pag,
+ struct xfs_buf *agbp,
+ long len)
{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)args->mp,
- (void *)(__psunsigned_t)args->agno,
- (void *)(__psunsigned_t)args->agbno,
- (void *)(__psunsigned_t)args->minlen,
- (void *)(__psunsigned_t)args->maxlen,
- (void *)(__psunsigned_t)args->mod,
- (void *)(__psunsigned_t)args->prod,
- (void *)(__psunsigned_t)args->minleft,
- (void *)(__psunsigned_t)args->total,
- (void *)(__psunsigned_t)args->alignment,
- (void *)(__psunsigned_t)args->len,
- (void *)((((__psint_t)args->type) << 16) |
- (__psint_t)args->otype),
- (void *)(__psint_t)((args->wasdel << 3) |
- (args->wasfromfl << 2) |
- (args->isfl << 1) |
- (args->userdata << 0)));
-}
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
-/*
- * Add an allocation trace entry for a free call.
- */
-STATIC void
-xfs_alloc_trace_free(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_mount_t *mp, /* file system mount point */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* a.g. relative block number */
- xfs_extlen_t len, /* length of extent */
- int isfl, /* set if is freelist allocation/free */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)mp,
- (void *)(__psunsigned_t)agno,
- (void *)(__psunsigned_t)agbno,
- (void *)(__psunsigned_t)len,
- (void *)(__psint_t)isfl,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-}
+ pag->pagf_freeblks += len;
+ be32_add_cpu(&agf->agf_freeblks, len);
-/*
- * Add an allocation trace entry for modifying an agf.
- */
-STATIC void
-xfs_alloc_trace_modagf(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_mount_t *mp, /* file system mount point */
- xfs_agf_t *agf, /* new agf value */
- int flags, /* logging flags for agf */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)mp,
- (void *)(__psint_t)flags,
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_length),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest));
-}
+ xfs_trans_agblocks_delta(tp, len);
+ if (unlikely(be32_to_cpu(agf->agf_freeblks) >
+ be32_to_cpu(agf->agf_length)))
+ return EFSCORRUPTED;
-STATIC void
-xfs_alloc_trace_busy(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_mount_t *mp, /* file system mount point */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* a.g. relative block number */
- xfs_extlen_t len, /* length of extent */
- int slot, /* perag Busy slot */
- xfs_trans_t *tp,
- int trtype, /* type: add, delete, search */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(trtype | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)mp,
- (void *)(__psunsigned_t)agno,
- (void *)(__psunsigned_t)agbno,
- (void *)(__psunsigned_t)len,
- (void *)(__psint_t)slot,
- (void *)tp,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
+ return 0;
}
-#endif /* XFS_ALLOC_TRACE */
/*
* Allocation group level functions.
@@ -572,46 +606,36 @@ xfs_alloc_ag_vextent(
ASSERT(0);
/* NOTREACHED */
}
- if (error)
+
+ if (error || args->agbno == NULLAGBLOCK)
return error;
- /*
- * If the allocation worked, need to change the agf structure
- * (and log it), and the superblock.
- */
- if (args->agbno != NULLAGBLOCK) {
- xfs_agf_t *agf; /* allocation group freelist header */
-#ifdef XFS_ALLOC_TRACE
- xfs_mount_t *mp = args->mp;
-#endif
- long slen = (long)args->len;
- ASSERT(args->len >= args->minlen && args->len <= args->maxlen);
- ASSERT(!(args->wasfromfl) || !args->isfl);
- ASSERT(args->agbno % args->alignment == 0);
- if (!(args->wasfromfl)) {
-
- agf = XFS_BUF_TO_AGF(args->agbp);
- be32_add_cpu(&agf->agf_freeblks, -(args->len));
- xfs_trans_agblocks_delta(args->tp,
- -((long)(args->len)));
- args->pag->pagf_freeblks -= args->len;
- ASSERT(be32_to_cpu(agf->agf_freeblks) <=
- be32_to_cpu(agf->agf_length));
- TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
- xfs_alloc_log_agf(args->tp, args->agbp,
- XFS_AGF_FREEBLKS);
- /* search the busylist for these blocks */
- xfs_alloc_search_busy(args->tp, args->agno,
- args->agbno, args->len);
- }
- if (!args->isfl)
- xfs_trans_mod_sb(args->tp,
- args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
- XFS_TRANS_SB_FDBLOCKS, -slen);
- XFS_STATS_INC(xs_allocx);
- XFS_STATS_ADD(xs_allocb, args->len);
+ ASSERT(args->len >= args->minlen);
+ ASSERT(args->len <= args->maxlen);
+ ASSERT(!args->wasfromfl || !args->isfl);
+ ASSERT(args->agbno % args->alignment == 0);
+
+ if (!args->wasfromfl) {
+ error = xfs_alloc_update_counters(args->tp, args->pag,
+ args->agbp,
+ -((long)(args->len)));
+ if (error)
+ return error;
+
+ ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
+ args->agbno, args->len));
}
- return 0;
+
+ if (!args->isfl) {
+ xfs_trans_mod_sb(args->tp, args->wasdel ?
+ XFS_TRANS_SB_RES_FDBLOCKS :
+ XFS_TRANS_SB_FDBLOCKS,
+ -((long)(args->len)));
+ }
+
+ XFS_STATS_INC(xs_allocx);
+ XFS_STATS_ADD(xs_allocb, args->len);
+ return error;
}
/*
@@ -626,94 +650,194 @@ xfs_alloc_ag_vextent_exact(
{
xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
- xfs_agblock_t end; /* end of allocated extent */
int error;
xfs_agblock_t fbno; /* start block of found extent */
- xfs_agblock_t fend; /* end block of found extent */
xfs_extlen_t flen; /* length of found extent */
+ xfs_agblock_t tbno; /* start block of trimmed extent */
+ xfs_extlen_t tlen; /* length of trimmed extent */
+ xfs_agblock_t tend; /* end block of trimmed extent */
int i; /* success/failure of operation */
- xfs_agblock_t maxend; /* end of maximal extent */
- xfs_agblock_t minend; /* end of minimal extent */
- xfs_extlen_t rlen; /* length of returned extent */
ASSERT(args->alignment == 1);
+
/*
* Allocate/initialize a cursor for the by-number freespace btree.
*/
- bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_BNO, NULL, 0);
+ bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
+ args->agno, XFS_BTNUM_BNO);
+
/*
* Lookup bno and minlen in the btree (minlen is irrelevant, really).
* Look for the closest free block <= bno, it must contain bno
* if any free block does.
*/
- if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i)))
+ error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
+ if (error)
goto error0;
- if (!i) {
- /*
- * Didn't find it, return null.
- */
- xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
- args->agbno = NULLAGBLOCK;
- return 0;
- }
+ if (!i)
+ goto not_found;
+
/*
* Grab the freespace record.
*/
- if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i)))
+ error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
+ if (error)
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
ASSERT(fbno <= args->agbno);
- minend = args->agbno + args->minlen;
- maxend = args->agbno + args->maxlen;
- fend = fbno + flen;
+
/*
- * Give up if the freespace isn't long enough for the minimum request.
+ * Check for overlapping busy extents.
*/
- if (fend < minend) {
- xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
- args->agbno = NULLAGBLOCK;
- return 0;
- }
+ xfs_extent_busy_trim(args, fbno, flen, &tbno, &tlen);
+
/*
- * End of extent will be smaller of the freespace end and the
- * maximal requested end.
+ * Give up if the start of the extent is busy, or the freespace isn't
+ * long enough for the minimum request.
*/
- end = XFS_AGBLOCK_MIN(fend, maxend);
+ if (tbno > args->agbno)
+ goto not_found;
+ if (tlen < args->minlen)
+ goto not_found;
+ tend = tbno + tlen;
+ if (tend < args->agbno + args->minlen)
+ goto not_found;
+
/*
+ * End of extent will be smaller of the freespace end and the
+ * maximal requested end.
+ *
* Fix the length according to mod and prod if given.
*/
- args->len = end - args->agbno;
+ args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
+ - args->agbno;
xfs_alloc_fix_len(args);
- if (!xfs_alloc_fix_minleft(args)) {
- xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
- return 0;
- }
- rlen = args->len;
- ASSERT(args->agbno + rlen <= fend);
- end = args->agbno + rlen;
+ if (!xfs_alloc_fix_minleft(args))
+ goto not_found;
+
+ ASSERT(args->agbno + args->len <= tend);
+
/*
- * We are allocating agbno for rlen [agbno .. end]
+ * We are allocating agbno for args->len
* Allocate/initialize a cursor for the by-size btree.
*/
- cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_CNT, NULL, 0);
+ cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
+ args->agno, XFS_BTNUM_CNT);
ASSERT(args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
- if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
- args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
+ error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
+ args->len, XFSA_FIXUP_BNO_OK);
+ if (error) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
goto error0;
}
+
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("normal", args);
+
args->wasfromfl = 0;
+ trace_xfs_alloc_exact_done(args);
+ return 0;
+
+not_found:
+ /* Didn't find it, return null. */
+ xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+ args->agbno = NULLAGBLOCK;
+ trace_xfs_alloc_exact_notfound(args);
return 0;
error0:
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_exact_error(args);
+ return error;
+}
+
+/*
+ * Search the btree in a given direction via the search cursor and compare
+ * the records found against the good extent we've already found.
+ */
+STATIC int
+xfs_alloc_find_best_extent(
+ struct xfs_alloc_arg *args, /* allocation argument structure */
+ struct xfs_btree_cur **gcur, /* good cursor */
+ struct xfs_btree_cur **scur, /* searching cursor */
+ xfs_agblock_t gdiff, /* difference for search comparison */
+ xfs_agblock_t *sbno, /* extent found by search */
+ xfs_extlen_t *slen, /* extent length */
+ xfs_agblock_t *sbnoa, /* aligned extent found by search */
+ xfs_extlen_t *slena, /* aligned extent length */
+ int dir) /* 0 = search right, 1 = search left */
+{
+ xfs_agblock_t new;
+ xfs_agblock_t sdiff;
+ int error;
+ int i;
+
+ /* The good extent is perfect, no need to search. */
+ if (!gdiff)
+ goto out_use_good;
+
+ /*
+ * Look until we find a better one, run out of space or run off the end.
+ */
+ do {
+ error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
+ if (error)
+ goto error0;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
+
+ /*
+ * The good extent is closer than this one.
+ */
+ if (!dir) {
+ if (*sbnoa >= args->agbno + gdiff)
+ goto out_use_good;
+ } else {
+ if (*sbnoa <= args->agbno - gdiff)
+ goto out_use_good;
+ }
+
+ /*
+ * Same distance, compare length and pick the best.
+ */
+ if (*slena >= args->minlen) {
+ args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
+ xfs_alloc_fix_len(args);
+
+ sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
+ args->alignment,
+ args->userdata, *sbnoa,
+ *slena, &new);
+
+ /*
+ * Choose closer size and invalidate other cursor.
+ */
+ if (sdiff < gdiff)
+ goto out_use_search;
+ goto out_use_good;
+ }
+
+ if (!dir)
+ error = xfs_btree_increment(*scur, 0, &i);
+ else
+ error = xfs_btree_decrement(*scur, 0, &i);
+ if (error)
+ goto error0;
+ } while (i);
+
+out_use_good:
+ xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
+ *scur = NULL;
+ return 0;
+
+out_use_search:
+ xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
+ *gcur = NULL;
+ return 0;
+
+error0:
+ /* caller invalidates cursors */
return error;
}
@@ -742,27 +866,33 @@ xfs_alloc_ag_vextent_near(
xfs_agblock_t ltbno; /* start bno of left side entry */
xfs_agblock_t ltbnoa; /* aligned ... */
xfs_extlen_t ltdiff; /* difference to left side entry */
- /*REFERENCED*/
- xfs_agblock_t ltend; /* end bno of left side entry */
xfs_extlen_t ltlen; /* length of left side entry */
xfs_extlen_t ltlena; /* aligned ... */
xfs_agblock_t ltnew; /* useful start bno of left side */
xfs_extlen_t rlen; /* length of returned extent */
-#if defined(DEBUG) && defined(__KERNEL__)
+ int forced = 0;
+#ifdef DEBUG
/*
* Randomly don't execute the first algorithm.
*/
int dofirst; /* set to do first algorithm */
- dofirst = random32() & 1;
+ dofirst = prandom_u32() & 1;
#endif
+
+restart:
+ bno_cur_lt = NULL;
+ bno_cur_gt = NULL;
+ ltlen = 0;
+ gtlena = 0;
+ ltlena = 0;
+
/*
* Get a cursor for the by-size btree.
*/
- cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_CNT, NULL, 0);
- ltlen = 0;
- bno_cur_lt = bno_cur_gt = NULL;
+ cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
+ args->agno, XFS_BTNUM_CNT);
+
/*
* See if there are any free extents as big as maxlen.
*/
@@ -778,11 +908,13 @@ xfs_alloc_ag_vextent_near(
goto error0;
if (i == 0 || ltlen == 0) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+ trace_xfs_alloc_near_noentry(args);
return 0;
}
ASSERT(i == 1);
}
args->wasfromfl = 0;
+
/*
* First algorithm.
* If the requested extent is large wrt the freespaces available
@@ -799,8 +931,8 @@ xfs_alloc_ag_vextent_near(
xfs_extlen_t blen=0;
xfs_agblock_t bnew=0;
-#if defined(DEBUG) && defined(__KERNEL__)
- if (!dofirst)
+#ifdef DEBUG
+ if (dofirst)
break;
#endif
/*
@@ -818,7 +950,7 @@ xfs_alloc_ag_vextent_near(
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if (ltlen >= args->minlen)
break;
- if ((error = xfs_alloc_increment(cnt_cur, 0, &i)))
+ if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
goto error0;
} while (i);
ASSERT(ltlen >= args->minlen);
@@ -828,7 +960,7 @@ xfs_alloc_ag_vextent_near(
i = cnt_cur->bc_ptrs[0];
for (j = 1, blen = 0, bdiff = 0;
!error && j && (blen < args->maxlen || bdiff > 0);
- error = xfs_alloc_increment(cnt_cur, 0, &j)) {
+ error = xfs_btree_increment(cnt_cur, 0, &j)) {
/*
* For each entry, decide if it's better than
* the previous best entry.
@@ -836,8 +968,8 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment,
- args->minlen, &ltbnoa, &ltlena);
+ xfs_alloc_compute_aligned(args, ltbno, ltlen,
+ &ltbnoa, &ltlena);
if (ltlena < args->minlen)
continue;
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
@@ -846,7 +978,8 @@ xfs_alloc_ag_vextent_near(
if (args->len < blen)
continue;
ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment, ltbno, ltlen, &ltnew);
+ args->alignment, args->userdata, ltbnoa,
+ ltlena, &ltnew);
if (ltnew != NULLAGBLOCK &&
(args->len > blen || ltdiff < bdiff)) {
bdiff = ltdiff;
@@ -868,12 +1001,11 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- ltend = ltbno + ltlen;
- ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
+ ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->len = blen;
if (!xfs_alloc_fix_minleft(args)) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("nominleft", args);
+ trace_xfs_alloc_near_nominleft(args);
return 0;
}
blen = args->len;
@@ -882,12 +1014,12 @@ xfs_alloc_ag_vextent_near(
*/
args->agbno = bnew;
ASSERT(bnew >= ltbno);
- ASSERT(bnew + blen <= ltend);
+ ASSERT(bnew + blen <= ltbno + ltlen);
/*
* Set up a cursor for the by-bno tree.
*/
- bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp,
- args->agbp, args->agno, XFS_BTNUM_BNO, NULL, 0);
+ bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
+ args->agbp, args->agno, XFS_BTNUM_BNO);
/*
* Fix up the btree entries.
*/
@@ -896,7 +1028,8 @@ xfs_alloc_ag_vextent_near(
goto error0;
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
- TRACE_ALLOC("first", args);
+
+ trace_xfs_alloc_near_first(args);
return 0;
}
/*
@@ -914,8 +1047,8 @@ xfs_alloc_ag_vextent_near(
/*
* Allocate and initialize the cursor for the leftward search.
*/
- bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_BNO, NULL, 0);
+ bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
+ args->agno, XFS_BTNUM_BNO);
/*
* Lookup <= bno to find the leftward search's starting point.
*/
@@ -938,7 +1071,7 @@ xfs_alloc_ag_vextent_near(
* Increment the cursor, so we will point at the entry just right
* of the leftward entry if any, or to the leftmost entry.
*/
- if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i)))
+ if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
goto error0;
if (!i) {
/*
@@ -957,11 +1090,11 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment,
- args->minlen, &ltbnoa, &ltlena);
+ xfs_alloc_compute_aligned(args, ltbno, ltlen,
+ &ltbnoa, &ltlena);
if (ltlena >= args->minlen)
break;
- if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i)))
+ if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
goto error0;
if (!i) {
xfs_btree_del_cursor(bno_cur_lt,
@@ -973,11 +1106,11 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(gtbno, gtlen, args->alignment,
- args->minlen, &gtbnoa, &gtlena);
+ xfs_alloc_compute_aligned(args, gtbno, gtlen,
+ &gtbnoa, &gtlena);
if (gtlena >= args->minlen)
break;
- if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i)))
+ if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
goto error0;
if (!i) {
xfs_btree_del_cursor(bno_cur_gt,
@@ -986,211 +1119,65 @@ xfs_alloc_ag_vextent_near(
}
}
} while (bno_cur_lt || bno_cur_gt);
+
/*
* Got both cursors still active, need to find better entry.
*/
if (bno_cur_lt && bno_cur_gt) {
- /*
- * Left side is long enough, look for a right side entry.
- */
if (ltlena >= args->minlen) {
/*
- * Fix up the length.
+ * Left side is good, look for a right side entry.
*/
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
- rlen = args->len;
- ltdiff = xfs_alloc_compute_diff(args->agbno, rlen,
- args->alignment, ltbno, ltlen, &ltnew);
- /*
- * Not perfect.
- */
- if (ltdiff) {
- /*
- * Look until we find a better one, run out of
- * space, or run off the end.
- */
- while (bno_cur_lt && bno_cur_gt) {
- if ((error = xfs_alloc_get_rec(
- bno_cur_gt, &gtbno,
- &gtlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(gtbno, gtlen,
- args->alignment, args->minlen,
- &gtbnoa, &gtlena);
- /*
- * The left one is clearly better.
- */
- if (gtbnoa >= args->agbno + ltdiff) {
- xfs_btree_del_cursor(
- bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- break;
- }
- /*
- * If we reach a big enough entry,
- * compare the two and pick the best.
- */
- if (gtlena >= args->minlen) {
- args->len =
- XFS_EXTLEN_MIN(gtlena,
- args->maxlen);
- xfs_alloc_fix_len(args);
- rlen = args->len;
- gtdiff = xfs_alloc_compute_diff(
- args->agbno, rlen,
- args->alignment,
- gtbno, gtlen, &gtnew);
- /*
- * Right side is better.
- */
- if (gtdiff < ltdiff) {
- xfs_btree_del_cursor(
- bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- }
- /*
- * Left side is better.
- */
- else {
- xfs_btree_del_cursor(
- bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- }
- break;
- }
- /*
- * Fell off the right end.
- */
- if ((error = xfs_alloc_increment(
- bno_cur_gt, 0, &i)))
- goto error0;
- if (!i) {
- xfs_btree_del_cursor(
- bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- break;
- }
- }
- }
- /*
- * The left side is perfect, trash the right side.
- */
- else {
- xfs_btree_del_cursor(bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- }
- }
- /*
- * It's the right side that was found first, look left.
- */
- else {
+ ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
+ args->alignment, args->userdata, ltbnoa,
+ ltlena, &ltnew);
+
+ error = xfs_alloc_find_best_extent(args,
+ &bno_cur_lt, &bno_cur_gt,
+ ltdiff, &gtbno, &gtlen,
+ &gtbnoa, &gtlena,
+ 0 /* search right */);
+ } else {
+ ASSERT(gtlena >= args->minlen);
+
/*
- * Fix up the length.
+ * Right side is good, look for a left side entry.
*/
args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
xfs_alloc_fix_len(args);
- rlen = args->len;
- gtdiff = xfs_alloc_compute_diff(args->agbno, rlen,
- args->alignment, gtbno, gtlen, &gtnew);
- /*
- * Right side entry isn't perfect.
- */
- if (gtdiff) {
- /*
- * Look until we find a better one, run out of
- * space, or run off the end.
- */
- while (bno_cur_lt && bno_cur_gt) {
- if ((error = xfs_alloc_get_rec(
- bno_cur_lt, &ltbno,
- &ltlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_alloc_compute_aligned(ltbno, ltlen,
- args->alignment, args->minlen,
- &ltbnoa, &ltlena);
- /*
- * The right one is clearly better.
- */
- if (ltbnoa <= args->agbno - gtdiff) {
- xfs_btree_del_cursor(
- bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- break;
- }
- /*
- * If we reach a big enough entry,
- * compare the two and pick the best.
- */
- if (ltlena >= args->minlen) {
- args->len = XFS_EXTLEN_MIN(
- ltlena, args->maxlen);
- xfs_alloc_fix_len(args);
- rlen = args->len;
- ltdiff = xfs_alloc_compute_diff(
- args->agbno, rlen,
- args->alignment,
- ltbno, ltlen, &ltnew);
- /*
- * Left side is better.
- */
- if (ltdiff < gtdiff) {
- xfs_btree_del_cursor(
- bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- }
- /*
- * Right side is better.
- */
- else {
- xfs_btree_del_cursor(
- bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- }
- break;
- }
- /*
- * Fell off the left end.
- */
- if ((error = xfs_alloc_decrement(
- bno_cur_lt, 0, &i)))
- goto error0;
- if (!i) {
- xfs_btree_del_cursor(bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- break;
- }
- }
- }
- /*
- * The right side is perfect, trash the left side.
- */
- else {
- xfs_btree_del_cursor(bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- }
+ gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
+ args->alignment, args->userdata, gtbnoa,
+ gtlena, &gtnew);
+
+ error = xfs_alloc_find_best_extent(args,
+ &bno_cur_gt, &bno_cur_lt,
+ gtdiff, &ltbno, &ltlen,
+ &ltbnoa, &ltlena,
+ 1 /* search left */);
}
+
+ if (error)
+ goto error0;
}
+
/*
* If we couldn't get anything, give up.
*/
if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
- TRACE_ALLOC("neither", args);
+ xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+
+ if (!forced++) {
+ trace_xfs_alloc_near_busy(args);
+ xfs_log_force(args->mp, XFS_LOG_SYNC);
+ goto restart;
+ }
+ trace_xfs_alloc_size_neither(args);
args->agbno = NULLAGBLOCK;
return 0;
}
+
/*
* At this point we have selected a freespace entry, either to the
* left or to the right. If it's on the right, copy all the
@@ -1207,35 +1194,41 @@ xfs_alloc_ag_vextent_near(
j = 1;
} else
j = 0;
+
/*
* Fix up the length and compute the useful address.
*/
- ltend = ltbno + ltlen;
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
if (!xfs_alloc_fix_minleft(args)) {
- TRACE_ALLOC("nominleft", args);
+ trace_xfs_alloc_near_nominleft(args);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
return 0;
}
rlen = args->len;
- (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno,
- ltlen, &ltnew);
+ (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
+ args->userdata, ltbnoa, ltlena, &ltnew);
ASSERT(ltnew >= ltbno);
- ASSERT(ltnew + rlen <= ltend);
+ ASSERT(ltnew + rlen <= ltbnoa + ltlena);
ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->agbno = ltnew;
+
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
ltnew, rlen, XFSA_FIXUP_BNO_OK)))
goto error0;
- TRACE_ALLOC(j ? "gt" : "lt", args);
+
+ if (j)
+ trace_xfs_alloc_near_greater(args);
+ else
+ trace_xfs_alloc_near_lesser(args);
+
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
return 0;
error0:
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_near_error(args);
if (cnt_cur != NULL)
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
if (bno_cur_lt != NULL)
@@ -1263,50 +1256,92 @@ xfs_alloc_ag_vextent_size(
int i; /* temp status variable */
xfs_agblock_t rbno; /* returned block number */
xfs_extlen_t rlen; /* length of returned extent */
+ int forced = 0;
+restart:
/*
* Allocate and initialize a cursor for the by-size btree.
*/
- cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_CNT, NULL, 0);
+ cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
+ args->agno, XFS_BTNUM_CNT);
bno_cur = NULL;
+
/*
* Look for an entry >= maxlen+alignment-1 blocks.
*/
if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
args->maxlen + args->alignment - 1, &i)))
goto error0;
+
/*
- * If none, then pick up the last entry in the tree unless the
- * tree is empty.
+ * If none or we have busy extents that we cannot allocate from, then
+ * we have to settle for a smaller extent. In the case that there are
+ * no large extents, this will return the last entry in the tree unless
+ * the tree is empty. In the case that there are only busy large
+ * extents, this will return the largest small extent unless there
+ * are no smaller extents available.
*/
- if (!i) {
- if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &fbno,
- &flen, &i)))
+ if (!i || forced > 1) {
+ error = xfs_alloc_ag_vextent_small(args, cnt_cur,
+ &fbno, &flen, &i);
+ if (error)
goto error0;
if (i == 0 || flen == 0) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("noentry", args);
+ trace_xfs_alloc_size_noentry(args);
return 0;
}
ASSERT(i == 1);
+ xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
+ } else {
+ /*
+ * Search for a non-busy extent that is large enough.
+ * If we are at low space, don't check, or if we fall of
+ * the end of the btree, turn off the busy check and
+ * restart.
+ */
+ for (;;) {
+ error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
+ if (error)
+ goto error0;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+
+ xfs_alloc_compute_aligned(args, fbno, flen,
+ &rbno, &rlen);
+
+ if (rlen >= args->maxlen)
+ break;
+
+ error = xfs_btree_increment(cnt_cur, 0, &i);
+ if (error)
+ goto error0;
+ if (i == 0) {
+ /*
+ * Our only valid extents must have been busy.
+ * Make it unbusy by forcing the log out and
+ * retrying. If we've been here before, forcing
+ * the log isn't making the extents available,
+ * which means they have probably been freed in
+ * this transaction. In that case, we have to
+ * give up on them and we'll attempt a minlen
+ * allocation the next time around.
+ */
+ xfs_btree_del_cursor(cnt_cur,
+ XFS_BTREE_NOERROR);
+ trace_xfs_alloc_size_busy(args);
+ if (!forced++)
+ xfs_log_force(args->mp, XFS_LOG_SYNC);
+ goto restart;
+ }
+ }
}
- /*
- * There's a freespace as big as maxlen+alignment-1, get it.
- */
- else {
- if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- }
+
/*
* In the first case above, we got the last entry in the
* by-size btree. Now we check to see if the space hits maxlen
* once aligned; if not, we search left for something better.
* This can't happen in the second case above.
*/
- xfs_alloc_compute_aligned(fbno, flen, args->alignment, args->minlen,
- &rbno, &rlen);
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
(rlen <= flen && rbno + rlen <= fbno + flen), error0);
@@ -1321,7 +1356,7 @@ xfs_alloc_ag_vextent_size(
bestflen = flen;
bestfbno = fbno;
for (;;) {
- if ((error = xfs_alloc_decrement(cnt_cur, 0, &i)))
+ if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
goto error0;
if (i == 0)
break;
@@ -1331,8 +1366,8 @@ xfs_alloc_ag_vextent_size(
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if (flen < bestrlen)
break;
- xfs_alloc_compute_aligned(fbno, flen, args->alignment,
- args->minlen, &rbno, &rlen);
+ xfs_alloc_compute_aligned(args, fbno, flen,
+ &rbno, &rlen);
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
(rlen <= flen && rbno + rlen <= fbno + flen),
@@ -1360,20 +1395,26 @@ xfs_alloc_ag_vextent_size(
* Fix up the length.
*/
args->len = rlen;
- xfs_alloc_fix_len(args);
- if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) {
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("nominleft", args);
- args->agbno = NULLAGBLOCK;
- return 0;
+ if (rlen < args->minlen) {
+ if (!forced++) {
+ xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+ trace_xfs_alloc_size_busy(args);
+ xfs_log_force(args->mp, XFS_LOG_SYNC);
+ goto restart;
+ }
+ goto out_nominleft;
}
+ xfs_alloc_fix_len(args);
+
+ if (!xfs_alloc_fix_minleft(args))
+ goto out_nominleft;
rlen = args->len;
XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
/*
* Allocate and initialize a cursor for the by-block tree.
*/
- bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_BNO, NULL, 0);
+ bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
+ args->agno, XFS_BTNUM_BNO);
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
rbno, rlen, XFSA_FIXUP_CNT_OK)))
goto error0;
@@ -1386,16 +1427,22 @@ xfs_alloc_ag_vextent_size(
args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
error0);
- TRACE_ALLOC("normal", args);
+ trace_xfs_alloc_size_done(args);
return 0;
error0:
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_size_error(args);
if (cnt_cur)
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
if (bno_cur)
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
return error;
+
+out_nominleft:
+ xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+ trace_xfs_alloc_size_nominleft(args);
+ args->agbno = NULLAGBLOCK;
+ return 0;
}
/*
@@ -1416,7 +1463,7 @@ xfs_alloc_ag_vextent_small(
xfs_extlen_t flen;
int i;
- if ((error = xfs_alloc_decrement(ccur, 0, &i)))
+ if ((error = xfs_btree_decrement(ccur, 0, &i)))
goto error0;
if (i) {
if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
@@ -1435,6 +1482,9 @@ xfs_alloc_ag_vextent_small(
if (error)
goto error0;
if (fbno != NULLAGBLOCK) {
+ xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
+ args->userdata);
+
if (args->userdata) {
xfs_buf_t *bp;
@@ -1449,7 +1499,7 @@ xfs_alloc_ag_vextent_small(
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
error0);
args->wasfromfl = 1;
- TRACE_ALLOC("freelist", args);
+ trace_xfs_alloc_small_freelist(args);
*stat = 0;
return 0;
}
@@ -1471,17 +1521,17 @@ xfs_alloc_ag_vextent_small(
*/
if (flen < args->minlen) {
args->agbno = NULLAGBLOCK;
- TRACE_ALLOC("notenough", args);
+ trace_xfs_alloc_small_notenough(args);
flen = 0;
}
*fbnop = fbno;
*flenp = flen;
*stat = 1;
- TRACE_ALLOC("normal", args);
+ trace_xfs_alloc_small_done(args);
return 0;
error0:
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_small_error(args);
return error;
}
@@ -1510,13 +1560,13 @@ xfs_free_ag_extent(
xfs_mount_t *mp; /* mount point struct for filesystem */
xfs_agblock_t nbno; /* new starting block of freespace */
xfs_extlen_t nlen; /* new length of freespace */
+ xfs_perag_t *pag; /* per allocation group data */
mp = tp->t_mountp;
/*
* Allocate and initialize a cursor for the by-block btree.
*/
- bno_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO, NULL,
- 0);
+ bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
cnt_cur = NULL;
/*
* Look for a neighboring block on the left (lower block numbers)
@@ -1549,7 +1599,7 @@ xfs_free_ag_extent(
* Look for a neighboring block on the right (higher block numbers)
* that is contiguous with this space.
*/
- if ((error = xfs_alloc_increment(bno_cur, 0, &haveright)))
+ if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
goto error0;
if (haveright) {
/*
@@ -1575,8 +1625,7 @@ xfs_free_ag_extent(
/*
* Now allocate and initialize a cursor for the by-size tree.
*/
- cnt_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT, NULL,
- 0);
+ cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
/*
* Have both left and right contiguous neighbors.
* Merge all three into a single free block.
@@ -1588,7 +1637,7 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_alloc_delete(cnt_cur, &i)))
+ if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
@@ -1597,19 +1646,19 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_alloc_delete(cnt_cur, &i)))
+ if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
* Delete the old by-block entry for the right block.
*/
- if ((error = xfs_alloc_delete(bno_cur, &i)))
+ if ((error = xfs_btree_delete(bno_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
* Move the by-block cursor back to the left neighbor.
*/
- if ((error = xfs_alloc_decrement(bno_cur, 0, &i)))
+ if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
#ifdef DEBUG
@@ -1648,14 +1697,14 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_alloc_delete(cnt_cur, &i)))
+ if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
* Back up the by-block cursor to the left neighbor, and
* update its length.
*/
- if ((error = xfs_alloc_decrement(bno_cur, 0, &i)))
+ if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
nbno = ltbno;
@@ -1674,7 +1723,7 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_alloc_delete(cnt_cur, &i)))
+ if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
@@ -1693,7 +1742,7 @@ xfs_free_ag_extent(
else {
nbno = bno;
nlen = len;
- if ((error = xfs_alloc_insert(bno_cur, &i)))
+ if ((error = xfs_btree_insert(bno_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
}
@@ -1705,55 +1754,32 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
- if ((error = xfs_alloc_insert(cnt_cur, &i)))
+ if ((error = xfs_btree_insert(cnt_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
cnt_cur = NULL;
+
/*
* Update the freespace totals in the ag and superblock.
*/
- {
- xfs_agf_t *agf;
- xfs_perag_t *pag; /* per allocation group data */
-
- agf = XFS_BUF_TO_AGF(agbp);
- pag = &mp->m_perag[agno];
- be32_add_cpu(&agf->agf_freeblks, len);
- xfs_trans_agblocks_delta(tp, len);
- pag->pagf_freeblks += len;
- XFS_WANT_CORRUPTED_GOTO(
- be32_to_cpu(agf->agf_freeblks) <=
- be32_to_cpu(agf->agf_length),
- error0);
- TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
- xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
- if (!isfl)
- xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
- XFS_STATS_INC(xs_freex);
- XFS_STATS_ADD(xs_freeb, len);
- }
- TRACE_FREE(haveleft ?
- (haveright ? "both" : "left") :
- (haveright ? "right" : "none"),
- agno, bno, len, isfl);
-
- /*
- * Since blocks move to the free list without the coordination
- * used in xfs_bmap_finish, we can't allow block to be available
- * for reallocation and non-transaction writing (user data)
- * until we know that the transaction that moved it to the free
- * list is permanently on disk. We track the blocks by declaring
- * these blocks as "busy"; the busy list is maintained on a per-ag
- * basis and each transaction records which entries should be removed
- * when the iclog commits to disk. If a busy block is allocated,
- * the iclog is pushed up to the LSN that freed the block.
- */
- xfs_alloc_mark_busy(tp, agno, bno, len);
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_alloc_update_counters(tp, pag, agbp, len);
+ xfs_perag_put(pag);
+ if (error)
+ goto error0;
+
+ if (!isfl)
+ xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
+ XFS_STATS_INC(xs_freex);
+ XFS_STATS_ADD(xs_freeb, len);
+
+ trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
+
return 0;
error0:
- TRACE_FREE("error", agno, bno, len, isfl);
+ trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
if (bno_cur)
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
if (cnt_cur)
@@ -1789,6 +1815,25 @@ xfs_alloc_compute_maxlevels(
}
/*
+ * Find the length of the longest extent in an AG.
+ */
+xfs_extlen_t
+xfs_alloc_longest_free_extent(
+ struct xfs_mount *mp,
+ struct xfs_perag *pag)
+{
+ xfs_extlen_t need, delta = 0;
+
+ need = XFS_MIN_FREELIST_PAG(pag, mp);
+ if (need > pag->pagf_flcount)
+ delta = need - pag->pagf_flcount;
+
+ if (pag->pagf_longest > delta)
+ return pag->pagf_longest - delta;
+ return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
+}
+
+/*
* Decide whether to use this allocation group for this allocation.
* If so, fix up the btree freelist's size.
*/
@@ -1840,15 +1885,12 @@ xfs_alloc_fix_freelist(
}
if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
- need = XFS_MIN_FREELIST_PAG(pag, mp);
- delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0;
/*
* If it looks like there isn't a long enough extent, or enough
* total blocks, reject it.
*/
- longest = (pag->pagf_longest > delta) ?
- (pag->pagf_longest - delta) :
- (pag->pagf_flcount > 0 || pag->pagf_longest > 0);
+ need = XFS_MIN_FREELIST_PAG(pag, mp);
+ longest = xfs_alloc_longest_free_extent(mp, pag);
if ((args->minlen + args->alignment + args->minalignslop - 1) >
longest ||
((int)(pag->pagf_freeblks + pag->pagf_flcount -
@@ -1916,12 +1958,11 @@ xfs_alloc_fix_freelist(
/*
* Initialize the args structure.
*/
+ memset(&targs, 0, sizeof(targs));
targs.tp = tp;
targs.mp = mp;
targs.agbp = agbp;
targs.agno = args->agno;
- targs.mod = targs.minleft = targs.wasdel = targs.userdata =
- targs.minalignslop = 0;
targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
targs.type = XFS_ALLOCTYPE_THIS_AG;
targs.pag = pag;
@@ -1979,18 +2020,18 @@ xfs_alloc_get_freelist(
int btreeblk) /* destination is a AGF btree */
{
xfs_agf_t *agf; /* a.g. freespace structure */
- xfs_agfl_t *agfl; /* a.g. freelist structure */
xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
xfs_agblock_t bno; /* block number returned */
+ __be32 *agfl_bno;
int error;
int logflags;
- xfs_mount_t *mp; /* mount structure */
+ xfs_mount_t *mp = tp->t_mountp;
xfs_perag_t *pag; /* per allocation group data */
- agf = XFS_BUF_TO_AGF(agbp);
/*
* Freelist is empty, give up.
*/
+ agf = XFS_BUF_TO_AGF(agbp);
if (!agf->agf_flcount) {
*bnop = NULLAGBLOCK;
return 0;
@@ -1998,23 +2039,27 @@ xfs_alloc_get_freelist(
/*
* Read the array of free blocks.
*/
- mp = tp->t_mountp;
- if ((error = xfs_alloc_read_agfl(mp, tp,
- be32_to_cpu(agf->agf_seqno), &agflbp)))
+ error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
+ &agflbp);
+ if (error)
return error;
- agfl = XFS_BUF_TO_AGFL(agflbp);
+
+
/*
* Get the block number and update the data structures.
*/
- bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
+ agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
+ bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
be32_add_cpu(&agf->agf_flfirst, 1);
xfs_trans_brelse(tp, agflbp);
if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
agf->agf_flfirst = 0;
- pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
+
+ pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
be32_add_cpu(&agf->agf_flcount, -1);
xfs_trans_agflist_delta(tp, -1);
pag->pagf_flcount--;
+ xfs_perag_put(pag);
logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
if (btreeblk) {
@@ -2023,19 +2068,9 @@ xfs_alloc_get_freelist(
logflags |= XFS_AGF_BTREEBLKS;
}
- TRACE_MODAGF(NULL, agf, logflags);
xfs_alloc_log_agf(tp, agbp, logflags);
*bnop = bno;
- /*
- * As blocks are freed, they are added to the per-ag busy list
- * and remain there until the freeing transaction is committed to
- * disk. Now that we have allocated blocks, this list must be
- * searched to see if a block is being reused. If one is, then
- * the freeing transaction must be pushed to disk NOW by forcing
- * to disk all iclogs up that transaction's LSN.
- */
- xfs_alloc_search_busy(tp, be32_to_cpu(agf->agf_seqno), bno, 1);
return 0;
}
@@ -2063,9 +2098,14 @@ xfs_alloc_log_agf(
offsetof(xfs_agf_t, agf_freeblks),
offsetof(xfs_agf_t, agf_longest),
offsetof(xfs_agf_t, agf_btreeblks),
+ offsetof(xfs_agf_t, agf_uuid),
sizeof(xfs_agf_t)
};
+ trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
+
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
+
xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
}
@@ -2102,12 +2142,13 @@ xfs_alloc_put_freelist(
int btreeblk) /* block came from a AGF btree */
{
xfs_agf_t *agf; /* a.g. freespace structure */
- xfs_agfl_t *agfl; /* a.g. free block array */
__be32 *blockp;/* pointer to array entry */
int error;
int logflags;
xfs_mount_t *mp; /* mount structure */
xfs_perag_t *pag; /* per allocation group data */
+ __be32 *agfl_bno;
+ int startoff;
agf = XFS_BUF_TO_AGF(agbp);
mp = tp->t_mountp;
@@ -2115,11 +2156,11 @@ xfs_alloc_put_freelist(
if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
be32_to_cpu(agf->agf_seqno), &agflbp)))
return error;
- agfl = XFS_BUF_TO_AGFL(agflbp);
be32_add_cpu(&agf->agf_fllast, 1);
if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
agf->agf_fllast = 0;
- pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
+
+ pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
be32_add_cpu(&agf->agf_flcount, 1);
xfs_trans_agflist_delta(tp, 1);
pag->pagf_flcount++;
@@ -2130,72 +2171,165 @@ xfs_alloc_put_freelist(
pag->pagf_btreeblks--;
logflags |= XFS_AGF_BTREEBLKS;
}
+ xfs_perag_put(pag);
- TRACE_MODAGF(NULL, agf, logflags);
xfs_alloc_log_agf(tp, agbp, logflags);
ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
- blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
+
+ agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
+ blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
*blockp = cpu_to_be32(bno);
- TRACE_MODAGF(NULL, agf, logflags);
+ startoff = (char *)blockp - (char *)agflbp->b_addr;
+
xfs_alloc_log_agf(tp, agbp, logflags);
- xfs_trans_log_buf(tp, agflbp,
- (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
- (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
- sizeof(xfs_agblock_t) - 1));
+
+ xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
+ xfs_trans_log_buf(tp, agflbp, startoff,
+ startoff + sizeof(xfs_agblock_t) - 1);
return 0;
}
+static bool
+xfs_agf_verify(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp)
+ {
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
+
+ if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ !uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_uuid))
+ return false;
+
+ if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
+ XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
+ be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
+ be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
+ be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
+ be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
+ return false;
+
+ /*
+ * during growfs operations, the perag is not fully initialised,
+ * so we can't use it for any useful checking. growfs ensures we can't
+ * use it by using uncached buffers that don't have the perag attached
+ * so we can detect and avoid this problem.
+ */
+ if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
+ return false;
+
+ if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
+ be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
+ return false;
+
+ return true;;
+
+}
+
+static void
+xfs_agf_read_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
+ xfs_buf_ioerror(bp, EFSBADCRC);
+ else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp,
+ XFS_ERRTAG_ALLOC_READ_AGF,
+ XFS_RANDOM_ALLOC_READ_AGF))
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+
+ if (bp->b_error)
+ xfs_verifier_error(bp);
+}
+
+static void
+xfs_agf_write_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_buf_log_item *bip = bp->b_fspriv;
+
+ if (!xfs_agf_verify(mp, bp)) {
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ xfs_verifier_error(bp);
+ return;
+ }
+
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return;
+
+ if (bip)
+ XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+
+ xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
+}
+
+const struct xfs_buf_ops xfs_agf_buf_ops = {
+ .verify_read = xfs_agf_read_verify,
+ .verify_write = xfs_agf_write_verify,
+};
+
/*
* Read in the allocation group header (free/alloc section).
*/
int /* error */
-xfs_alloc_read_agf(
- xfs_mount_t *mp, /* mount point structure */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- int flags, /* XFS_ALLOC_FLAG_... */
- xfs_buf_t **bpp) /* buffer for the ag freelist header */
+xfs_read_agf(
+ struct xfs_mount *mp, /* mount point structure */
+ struct xfs_trans *tp, /* transaction pointer */
+ xfs_agnumber_t agno, /* allocation group number */
+ int flags, /* XFS_BUF_ */
+ struct xfs_buf **bpp) /* buffer for the ag freelist header */
{
- xfs_agf_t *agf; /* ag freelist header */
- int agf_ok; /* set if agf is consistent */
- xfs_buf_t *bp; /* return value */
- xfs_perag_t *pag; /* per allocation group data */
int error;
+ trace_xfs_read_agf(mp, agno);
+
ASSERT(agno != NULLAGNUMBER);
error = xfs_trans_read_buf(
mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
- XFS_FSS_TO_BB(mp, 1),
- (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0U,
- &bp);
+ XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
if (error)
return error;
- ASSERT(!bp || !XFS_BUF_GETERROR(bp));
- if (!bp) {
- *bpp = NULL;
+ if (!*bpp)
return 0;
- }
- /*
- * Validate the magic number of the agf block.
- */
- agf = XFS_BUF_TO_AGF(bp);
- agf_ok =
- be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC &&
- XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
- be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
- be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
- be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
- be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp);
- if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
- XFS_RANDOM_ALLOC_READ_AGF))) {
- XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
- XFS_ERRLEVEL_LOW, mp, agf);
- xfs_trans_brelse(tp, bp);
- return XFS_ERROR(EFSCORRUPTED);
- }
- pag = &mp->m_perag[agno];
+
+ ASSERT(!(*bpp)->b_error);
+ xfs_buf_set_ref(*bpp, XFS_AGF_REF);
+ return 0;
+}
+
+/*
+ * Read in the allocation group header (free/alloc section).
+ */
+int /* error */
+xfs_alloc_read_agf(
+ struct xfs_mount *mp, /* mount point structure */
+ struct xfs_trans *tp, /* transaction pointer */
+ xfs_agnumber_t agno, /* allocation group number */
+ int flags, /* XFS_ALLOC_FLAG_... */
+ struct xfs_buf **bpp) /* buffer for the ag freelist header */
+{
+ struct xfs_agf *agf; /* ag freelist header */
+ struct xfs_perag *pag; /* per allocation group data */
+ int error;
+
+ trace_xfs_alloc_read_agf(mp, agno);
+
+ ASSERT(agno != NULLAGNUMBER);
+ error = xfs_read_agf(mp, tp, agno,
+ (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
+ bpp);
+ if (error)
+ return error;
+ if (!*bpp)
+ return 0;
+ ASSERT(!(*bpp)->b_error);
+
+ agf = XFS_BUF_TO_AGF(*bpp);
+ pag = xfs_perag_get(mp, agno);
if (!pag->pagf_init) {
pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
@@ -2206,13 +2340,14 @@ xfs_alloc_read_agf(
pag->pagf_levels[XFS_BTNUM_CNTi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
spin_lock_init(&pag->pagb_lock);
- pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS *
- sizeof(xfs_perag_busy_t), KM_SLEEP);
+ pag->pagb_count = 0;
+ pag->pagb_tree = RB_ROOT;
pag->pagf_init = 1;
}
#ifdef DEBUG
else if (!XFS_FORCED_SHUTDOWN(mp)) {
ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
+ ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
@@ -2221,8 +2356,7 @@ xfs_alloc_read_agf(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
}
#endif
- XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF);
- *bpp = bp;
+ xfs_perag_put(pag);
return 0;
}
@@ -2269,7 +2403,7 @@ xfs_alloc_vextent(
args->minlen > args->maxlen || args->minlen > agsize ||
args->mod >= args->prod) {
args->fsbno = NULLFSBLOCK;
- TRACE_ALLOC("badargs", args);
+ trace_xfs_alloc_vextent_badargs(args);
return 0;
}
minleft = args->minleft;
@@ -2282,24 +2416,21 @@ xfs_alloc_vextent(
* These three force us into a single a.g.
*/
args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
- down_read(&mp->m_peraglock);
- args->pag = &mp->m_perag[args->agno];
+ args->pag = xfs_perag_get(mp, args->agno);
args->minleft = 0;
error = xfs_alloc_fix_freelist(args, 0);
args->minleft = minleft;
if (error) {
- TRACE_ALLOC("nofix", args);
+ trace_xfs_alloc_vextent_nofix(args);
goto error0;
}
if (!args->agbp) {
- up_read(&mp->m_peraglock);
- TRACE_ALLOC("noagbp", args);
+ trace_xfs_alloc_vextent_noagbp(args);
break;
}
args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
if ((error = xfs_alloc_ag_vextent(args)))
goto error0;
- up_read(&mp->m_peraglock);
break;
case XFS_ALLOCTYPE_START_BNO:
/*
@@ -2351,14 +2482,13 @@ xfs_alloc_vextent(
* Loop over allocation groups twice; first time with
* trylock set, second time without.
*/
- down_read(&mp->m_peraglock);
for (;;) {
- args->pag = &mp->m_perag[args->agno];
+ args->pag = xfs_perag_get(mp, args->agno);
if (no_min) args->minleft = 0;
error = xfs_alloc_fix_freelist(args, flags);
args->minleft = minleft;
if (error) {
- TRACE_ALLOC("nofix", args);
+ trace_xfs_alloc_vextent_nofix(args);
goto error0;
}
/*
@@ -2369,7 +2499,9 @@ xfs_alloc_vextent(
goto error0;
break;
}
- TRACE_ALLOC("loopfailed", args);
+
+ trace_xfs_alloc_vextent_loopfailed(args);
+
/*
* Didn't work, figure out the next iteration.
*/
@@ -2396,7 +2528,7 @@ xfs_alloc_vextent(
if (args->agno == sagno) {
if (no_min == 1) {
args->agbno = NULLAGBLOCK;
- TRACE_ALLOC("allfailed", args);
+ trace_xfs_alloc_vextent_allfailed(args);
break;
}
if (flags == 0) {
@@ -2410,8 +2542,8 @@ xfs_alloc_vextent(
}
}
}
+ xfs_perag_put(args->pag);
}
- up_read(&mp->m_peraglock);
if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
if (args->agno == sagno)
mp->m_agfrotor = (mp->m_agfrotor + 1) %
@@ -2437,9 +2569,10 @@ xfs_alloc_vextent(
args->len);
#endif
}
+ xfs_perag_put(args->pag);
return 0;
error0:
- up_read(&mp->m_peraglock);
+ xfs_perag_put(args->pag);
return error;
}
@@ -2461,157 +2594,37 @@ xfs_free_extent(
memset(&args, 0, sizeof(xfs_alloc_arg_t));
args.tp = tp;
args.mp = tp->t_mountp;
- args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
- ASSERT(args.agno < args.mp->m_sb.sb_agcount);
- args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
- down_read(&args.mp->m_peraglock);
- args.pag = &args.mp->m_perag[args.agno];
- if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
- goto error0;
-#ifdef DEBUG
- ASSERT(args.agbp != NULL);
- ASSERT((args.agbno + len) <=
- be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length));
-#endif
- error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
-error0:
- up_read(&args.mp->m_peraglock);
- return error;
-}
+ /*
+ * validate that the block number is legal - the enables us to detect
+ * and handle a silent filesystem corruption rather than crashing.
+ */
+ args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
+ if (args.agno >= args.mp->m_sb.sb_agcount)
+ return EFSCORRUPTED;
-/*
- * AG Busy list management
- * The busy list contains block ranges that have been freed but whose
- * transactions have not yet hit disk. If any block listed in a busy
- * list is reused, the transaction that freed it must be forced to disk
- * before continuing to use the block.
- *
- * xfs_alloc_mark_busy - add to the per-ag busy list
- * xfs_alloc_clear_busy - remove an item from the per-ag busy list
- */
-void
-xfs_alloc_mark_busy(xfs_trans_t *tp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- xfs_extlen_t len)
-{
- xfs_mount_t *mp;
- xfs_perag_busy_t *bsy;
- int n;
-
- mp = tp->t_mountp;
- spin_lock(&mp->m_perag[agno].pagb_lock);
-
- /* search pagb_list for an open slot */
- for (bsy = mp->m_perag[agno].pagb_list, n = 0;
- n < XFS_PAGB_NUM_SLOTS;
- bsy++, n++) {
- if (bsy->busy_tp == NULL) {
- break;
- }
- }
-
- if (n < XFS_PAGB_NUM_SLOTS) {
- bsy = &mp->m_perag[agno].pagb_list[n];
- mp->m_perag[agno].pagb_count++;
- TRACE_BUSY("xfs_alloc_mark_busy", "got", agno, bno, len, n, tp);
- bsy->busy_start = bno;
- bsy->busy_length = len;
- bsy->busy_tp = tp;
- xfs_trans_add_busy(tp, agno, n);
- } else {
- TRACE_BUSY("xfs_alloc_mark_busy", "FULL", agno, bno, len, -1, tp);
- /*
- * The busy list is full! Since it is now not possible to
- * track the free block, make this a synchronous transaction
- * to insure that the block is not reused before this
- * transaction commits.
- */
- xfs_trans_set_sync(tp);
- }
-
- spin_unlock(&mp->m_perag[agno].pagb_lock);
-}
-
-void
-xfs_alloc_clear_busy(xfs_trans_t *tp,
- xfs_agnumber_t agno,
- int idx)
-{
- xfs_mount_t *mp;
- xfs_perag_busy_t *list;
-
- mp = tp->t_mountp;
-
- spin_lock(&mp->m_perag[agno].pagb_lock);
- list = mp->m_perag[agno].pagb_list;
-
- ASSERT(idx < XFS_PAGB_NUM_SLOTS);
- if (list[idx].busy_tp == tp) {
- TRACE_UNBUSY("xfs_alloc_clear_busy", "found", agno, idx, tp);
- list[idx].busy_tp = NULL;
- mp->m_perag[agno].pagb_count--;
- } else {
- TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp);
- }
-
- spin_unlock(&mp->m_perag[agno].pagb_lock);
-}
-
-
-/*
- * If we find the extent in the busy list, force the log out to get the
- * extent out of the busy list so the caller can use it straight away.
- */
-STATIC void
-xfs_alloc_search_busy(xfs_trans_t *tp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- xfs_extlen_t len)
-{
- xfs_mount_t *mp;
- xfs_perag_busy_t *bsy;
- xfs_agblock_t uend, bend;
- xfs_lsn_t lsn;
- int cnt;
-
- mp = tp->t_mountp;
-
- spin_lock(&mp->m_perag[agno].pagb_lock);
- cnt = mp->m_perag[agno].pagb_count;
+ args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
+ if (args.agbno >= args.mp->m_sb.sb_agblocks)
+ return EFSCORRUPTED;
- uend = bno + len - 1;
+ args.pag = xfs_perag_get(args.mp, args.agno);
+ ASSERT(args.pag);
- /* search pagb_list for this slot, skipping open slots */
- for (bsy = mp->m_perag[agno].pagb_list; cnt; bsy++) {
+ error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
+ if (error)
+ goto error0;
- /*
- * (start1,length1) within (start2, length2)
- */
- if (bsy->busy_tp != NULL) {
- bend = bsy->busy_start + bsy->busy_length - 1;
- if ((bno > bend) || (uend < bsy->busy_start)) {
- cnt--;
- } else {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy",
- "found1", agno, bno, len, tp);
- break;
- }
- }
+ /* validate the extent size is legal now we have the agf locked */
+ if (args.agbno + len >
+ be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
+ error = EFSCORRUPTED;
+ goto error0;
}
- /*
- * If a block was found, force the log through the LSN of the
- * transaction that freed the block
- */
- if (cnt) {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, tp);
- lsn = bsy->busy_tp->t_commit_lsn;
- spin_unlock(&mp->m_perag[agno].pagb_lock);
- xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
- } else {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, tp);
- spin_unlock(&mp->m_perag[agno].pagb_lock);
- }
+ error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
+ if (!error)
+ xfs_extent_busy_insert(tp, args.agno, args.agbno, len, 0);
+error0:
+ xfs_perag_put(args.pag);
+ return error;
}
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 5aec15d0651..feacb061bab 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -19,23 +19,35 @@
#define __XFS_ALLOC_H__
struct xfs_buf;
+struct xfs_btree_cur;
struct xfs_mount;
struct xfs_perag;
struct xfs_trans;
+extern struct workqueue_struct *xfs_alloc_wq;
+
/*
* Freespace allocation types. Argument to xfs_alloc_[v]extent.
*/
-typedef enum xfs_alloctype
-{
- XFS_ALLOCTYPE_ANY_AG, /* allocate anywhere, use rotor */
- XFS_ALLOCTYPE_FIRST_AG, /* ... start at ag 0 */
- XFS_ALLOCTYPE_START_AG, /* anywhere, start in this a.g. */
- XFS_ALLOCTYPE_THIS_AG, /* anywhere in this a.g. */
- XFS_ALLOCTYPE_START_BNO, /* near this block else anywhere */
- XFS_ALLOCTYPE_NEAR_BNO, /* in this a.g. and near this block */
- XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */
-} xfs_alloctype_t;
+#define XFS_ALLOCTYPE_ANY_AG 0x01 /* allocate anywhere, use rotor */
+#define XFS_ALLOCTYPE_FIRST_AG 0x02 /* ... start at ag 0 */
+#define XFS_ALLOCTYPE_START_AG 0x04 /* anywhere, start in this a.g. */
+#define XFS_ALLOCTYPE_THIS_AG 0x08 /* anywhere in this a.g. */
+#define XFS_ALLOCTYPE_START_BNO 0x10 /* near this block else anywhere */
+#define XFS_ALLOCTYPE_NEAR_BNO 0x20 /* in this a.g. and near this block */
+#define XFS_ALLOCTYPE_THIS_BNO 0x40 /* at exactly this block */
+
+/* this should become an enum again when the tracing code is fixed */
+typedef unsigned int xfs_alloctype_t;
+
+#define XFS_ALLOC_TYPES \
+ { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \
+ { XFS_ALLOCTYPE_FIRST_AG, "FIRST_AG" }, \
+ { XFS_ALLOCTYPE_START_AG, "START_AG" }, \
+ { XFS_ALLOCTYPE_THIS_AG, "THIS_AG" }, \
+ { XFS_ALLOCTYPE_START_BNO, "START_BNO" }, \
+ { XFS_ALLOCTYPE_NEAR_BNO, "NEAR_BNO" }, \
+ { XFS_ALLOCTYPE_THIS_BNO, "THIS_BNO" }
/*
* Flags for xfs_alloc_fix_freelist.
@@ -64,6 +76,22 @@ typedef enum xfs_alloctype
#define XFS_ALLOC_SET_ASIDE(mp) (4 + ((mp)->m_sb.sb_agcount * 4))
/*
+ * When deciding how much space to allocate out of an AG, we limit the
+ * allocation maximum size to the size the AG. However, we cannot use all the
+ * blocks in the AG - some are permanently used by metadata. These
+ * blocks are generally:
+ * - the AG superblock, AGF, AGI and AGFL
+ * - the AGF (bno and cnt) and AGI btree root blocks
+ * - 4 blocks on the AGFL according to XFS_ALLOC_SET_ASIDE() limits
+ *
+ * The AG headers are sector sized, so the amount of space they take up is
+ * dependent on filesystem geometry. The others are all single blocks.
+ */
+#define XFS_ALLOC_AG_MAX_USABLE(mp) \
+ ((mp)->m_sb.sb_agblocks - XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)) - 7)
+
+
+/*
* Argument structure for xfs_alloc routines.
* This is turned into a structure to avoid having 20 arguments passed
* down several levels of the stack.
@@ -100,26 +128,12 @@ typedef struct xfs_alloc_arg {
#define XFS_ALLOC_USERDATA 1 /* allocation is for user data*/
#define XFS_ALLOC_INITIAL_USER_DATA 2 /* special case start of file */
-
-#ifdef __KERNEL__
-
-#if defined(XFS_ALLOC_TRACE)
/*
- * Allocation tracing buffer size.
+ * Find the length of the longest extent in an AG.
*/
-#define XFS_ALLOC_TRACE_SIZE 4096
-extern ktrace_t *xfs_alloc_trace_buf;
-
-/*
- * Types for alloc tracing.
- */
-#define XFS_ALLOC_KTRACE_ALLOC 1
-#define XFS_ALLOC_KTRACE_FREE 2
-#define XFS_ALLOC_KTRACE_MODAGF 3
-#define XFS_ALLOC_KTRACE_BUSY 4
-#define XFS_ALLOC_KTRACE_UNBUSY 5
-#define XFS_ALLOC_KTRACE_BUSYSEARCH 6
-#endif
+xfs_extlen_t
+xfs_alloc_longest_free_extent(struct xfs_mount *mp,
+ struct xfs_perag *pag);
/*
* Compute and fill in value of m_ag_maxlevels.
@@ -196,18 +210,25 @@ xfs_free_extent(
xfs_fsblock_t bno, /* starting block number of extent */
xfs_extlen_t len); /* length of extent */
-void
-xfs_alloc_mark_busy(xfs_trans_t *tp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- xfs_extlen_t len);
+int /* error */
+xfs_alloc_lookup_le(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t bno, /* starting block of extent */
+ xfs_extlen_t len, /* length of extent */
+ int *stat); /* success/failure */
-void
-xfs_alloc_clear_busy(xfs_trans_t *tp,
- xfs_agnumber_t ag,
- int idx);
-
-
-#endif /* __KERNEL__ */
+int /* error */
+xfs_alloc_lookup_ge(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t bno, /* starting block of extent */
+ xfs_extlen_t len, /* length of extent */
+ int *stat); /* success/failure */
+
+int /* error */
+xfs_alloc_get_rec(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t *bno, /* output: starting block of extent */
+ xfs_extlen_t *len, /* output: length of extent */
+ int *stat); /* output: success/failure */
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 3ce2645508a..8358f1ded94 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -17,2195 +17,488 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_ialloc.h"
+#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
+#include "xfs_extent_busy.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_trans.h"
-/*
- * Prototypes for internal functions.
- */
-STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int);
-STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
-STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
-STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
-STATIC int xfs_alloc_lshift(xfs_btree_cur_t *, int, int *);
-STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *);
-STATIC int xfs_alloc_rshift(xfs_btree_cur_t *, int, int *);
-STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *,
- xfs_alloc_key_t *, xfs_btree_cur_t **, int *);
-STATIC int xfs_alloc_updkey(xfs_btree_cur_t *, xfs_alloc_key_t *, int);
+STATIC struct xfs_btree_cur *
+xfs_allocbt_dup_cursor(
+ struct xfs_btree_cur *cur)
+{
+ return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
+ cur->bc_private.a.agbp, cur->bc_private.a.agno,
+ cur->bc_btnum);
+}
-/*
- * Internal functions.
- */
+STATIC void
+xfs_allocbt_set_root(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ int inc)
+{
+ struct xfs_buf *agbp = cur->bc_private.a.agbp;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ int btnum = cur->bc_btnum;
+ struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
-/*
- * Single level of the xfs_alloc_delete record deletion routine.
- * Delete record pointed to by cur/level.
- * Remove the record from its block then rebalance the tree.
- * Return 0 for error, 1 for done, 2 to go on to the next level.
- */
-STATIC int /* error */
-xfs_alloc_delrec(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level, /* level removing record from */
- int *stat) /* fail/done/go-on */
+ ASSERT(ptr->s != 0);
+
+ agf->agf_roots[btnum] = ptr->s;
+ be32_add_cpu(&agf->agf_levels[btnum], inc);
+ pag->pagf_levels[btnum] += inc;
+ xfs_perag_put(pag);
+
+ xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
+}
+
+STATIC int
+xfs_allocbt_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
{
- xfs_agf_t *agf; /* allocation group freelist header */
- xfs_alloc_block_t *block; /* btree block record/key lives in */
- xfs_agblock_t bno; /* btree block number */
- xfs_buf_t *bp; /* buffer for block */
- int error; /* error return value */
- int i; /* loop index */
- xfs_alloc_key_t key; /* kp points here if block is level 0 */
- xfs_agblock_t lbno; /* left block's block number */
- xfs_buf_t *lbp; /* left block's buffer pointer */
- xfs_alloc_block_t *left; /* left btree block */
- xfs_alloc_key_t *lkp=NULL; /* left block key pointer */
- xfs_alloc_ptr_t *lpp=NULL; /* left block address pointer */
- int lrecs=0; /* number of records in left block */
- xfs_alloc_rec_t *lrp; /* left block record pointer */
- xfs_mount_t *mp; /* mount structure */
- int ptr; /* index in btree block for this rec */
- xfs_agblock_t rbno; /* right block's block number */
- xfs_buf_t *rbp; /* right block's buffer pointer */
- xfs_alloc_block_t *right; /* right btree block */
- xfs_alloc_key_t *rkp; /* right block key pointer */
- xfs_alloc_ptr_t *rpp; /* right block address pointer */
- int rrecs=0; /* number of records in right block */
- int numrecs;
- xfs_alloc_rec_t *rrp; /* right block record pointer */
- xfs_btree_cur_t *tcur; /* temporary btree cursor */
+ int error;
+ xfs_agblock_t bno;
- /*
- * Get the index of the entry being deleted, check for nothing there.
- */
- ptr = cur->bc_ptrs[level];
- if (ptr == 0) {
- *stat = 0;
- return 0;
- }
- /*
- * Get the buffer & block containing the record or key/ptr.
- */
- bp = cur->bc_bufs[level];
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
- return error;
-#endif
- /*
- * Fail if we're off the end of the block.
- */
- numrecs = be16_to_cpu(block->bb_numrecs);
- if (ptr > numrecs) {
- *stat = 0;
- return 0;
- }
- XFS_STATS_INC(xs_abt_delrec);
- /*
- * It's a nonleaf. Excise the key and ptr being deleted, by
- * sliding the entries past them down one.
- * Log the changed areas of the block.
- */
- if (level > 0) {
- lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
- lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
-#ifdef DEBUG
- for (i = ptr; i < numrecs; i++) {
- if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
- return error;
- }
-#endif
- if (ptr < numrecs) {
- memmove(&lkp[ptr - 1], &lkp[ptr],
- (numrecs - ptr) * sizeof(*lkp));
- memmove(&lpp[ptr - 1], &lpp[ptr],
- (numrecs - ptr) * sizeof(*lpp));
- xfs_alloc_log_ptrs(cur, bp, ptr, numrecs - 1);
- xfs_alloc_log_keys(cur, bp, ptr, numrecs - 1);
- }
- }
- /*
- * It's a leaf. Excise the record being deleted, by sliding the
- * entries past it down one. Log the changed areas of the block.
- */
- else {
- lrp = XFS_ALLOC_REC_ADDR(block, 1, cur);
- if (ptr < numrecs) {
- memmove(&lrp[ptr - 1], &lrp[ptr],
- (numrecs - ptr) * sizeof(*lrp));
- xfs_alloc_log_recs(cur, bp, ptr, numrecs - 1);
- }
- /*
- * If it's the first record in the block, we'll need a key
- * structure to pass up to the next level (updkey).
- */
- if (ptr == 1) {
- key.ar_startblock = lrp->ar_startblock;
- key.ar_blockcount = lrp->ar_blockcount;
- lkp = &key;
- }
- }
- /*
- * Decrement and log the number of entries in the block.
- */
- numrecs--;
- block->bb_numrecs = cpu_to_be16(numrecs);
- xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
- /*
- * See if the longest free extent in the allocation group was
- * changed by this operation. True if it's the by-size btree, and
- * this is the leaf level, and there is no right sibling block,
- * and this was the last record.
- */
- agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
- mp = cur->bc_mp;
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
- if (level == 0 &&
- cur->bc_btnum == XFS_BTNUM_CNT &&
- be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
- ptr > numrecs) {
- ASSERT(ptr == numrecs + 1);
- /*
- * There are still records in the block. Grab the size
- * from the last one.
- */
- if (numrecs) {
- rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur);
- agf->agf_longest = rrp->ar_blockcount;
- }
- /*
- * No free extents left.
- */
- else
- agf->agf_longest = 0;
- mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest =
- be32_to_cpu(agf->agf_longest);
- xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
- XFS_AGF_LONGEST);
- }
- /*
- * Is this the root level? If so, we're almost done.
- */
- if (level == cur->bc_nlevels - 1) {
- /*
- * If this is the root level,
- * and there's only one entry left,
- * and it's NOT the leaf level,
- * then we can get rid of this level.
- */
- if (numrecs == 1 && level > 0) {
- /*
- * lpp is still set to the first pointer in the block.
- * Make it the new root of the btree.
- */
- bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
- agf->agf_roots[cur->bc_btnum] = *lpp;
- be32_add_cpu(&agf->agf_levels[cur->bc_btnum], -1);
- mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--;
- /*
- * Put this buffer/block on the ag's freelist.
- */
- error = xfs_alloc_put_freelist(cur->bc_tp,
- cur->bc_private.a.agbp, NULL, bno, 1);
- if (error)
- return error;
- /*
- * Since blocks move to the free list without the
- * coordination used in xfs_bmap_finish, we can't allow
- * block to be available for reallocation and
- * non-transaction writing (user data) until we know
- * that the transaction that moved it to the free list
- * is permanently on disk. We track the blocks by
- * declaring these blocks as "busy"; the busy list is
- * maintained on a per-ag basis and each transaction
- * records which entries should be removed when the
- * iclog commits to disk. If a busy block is
- * allocated, the iclog is pushed up to the LSN
- * that freed the block.
- */
- xfs_alloc_mark_busy(cur->bc_tp,
- be32_to_cpu(agf->agf_seqno), bno, 1);
-
- xfs_trans_agbtree_delta(cur->bc_tp, -1);
- xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
- XFS_AGF_ROOTS | XFS_AGF_LEVELS);
- /*
- * Update the cursor so there's one fewer level.
- */
- xfs_btree_setbuf(cur, level, NULL);
- cur->bc_nlevels--;
- } else if (level > 0 &&
- (error = xfs_alloc_decrement(cur, level, &i)))
- return error;
- *stat = 1;
- return 0;
- }
- /*
- * If we deleted the leftmost entry in the block, update the
- * key values above us in the tree.
- */
- if (ptr == 1 && (error = xfs_alloc_updkey(cur, lkp, level + 1)))
+ /* Allocate the new block from the freelist. If we can't, give up. */
+ error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+ &bno, 1);
+ if (error) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
- /*
- * If the number of records remaining in the block is at least
- * the minimum, we're done.
- */
- if (numrecs >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
- if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i)))
- return error;
- *stat = 1;
- return 0;
}
- /*
- * Otherwise, we have to move some records around to keep the
- * tree balanced. Look at the left and right sibling blocks to
- * see if we can re-balance by moving only one record.
- */
- rbno = be32_to_cpu(block->bb_rightsib);
- lbno = be32_to_cpu(block->bb_leftsib);
- bno = NULLAGBLOCK;
- ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK);
- /*
- * Duplicate the cursor so our btree manipulations here won't
- * disrupt the next level up.
- */
- if ((error = xfs_btree_dup_cursor(cur, &tcur)))
- return error;
- /*
- * If there's a right sibling, see if it's ok to shift an entry
- * out of it.
- */
- if (rbno != NULLAGBLOCK) {
- /*
- * Move the temp cursor to the last entry in the next block.
- * Actually any entry but the first would suffice.
- */
- i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_alloc_increment(tcur, level, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- /*
- * Grab a pointer to the block.
- */
- rbp = tcur->bc_bufs[level];
- right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
- goto error0;
-#endif
- /*
- * Grab the current block number, for future use.
- */
- bno = be32_to_cpu(right->bb_leftsib);
- /*
- * If right block is full enough so that removing one entry
- * won't make it too empty, and left-shifting an entry out
- * of right to us works, we're done.
- */
- if (be16_to_cpu(right->bb_numrecs) - 1 >=
- XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
- if ((error = xfs_alloc_lshift(tcur, level, &i)))
- goto error0;
- if (i) {
- ASSERT(be16_to_cpu(block->bb_numrecs) >=
- XFS_ALLOC_BLOCK_MINRECS(level, cur));
- xfs_btree_del_cursor(tcur,
- XFS_BTREE_NOERROR);
- if (level > 0 &&
- (error = xfs_alloc_decrement(cur, level,
- &i)))
- return error;
- *stat = 1;
- return 0;
- }
- }
- /*
- * Otherwise, grab the number of records in right for
- * future reference, and fix up the temp cursor to point
- * to our block again (last record).
- */
- rrecs = be16_to_cpu(right->bb_numrecs);
- if (lbno != NULLAGBLOCK) {
- i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_alloc_decrement(tcur, level, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- }
- }
- /*
- * If there's a left sibling, see if it's ok to shift an entry
- * out of it.
- */
- if (lbno != NULLAGBLOCK) {
- /*
- * Move the temp cursor to the first entry in the
- * previous block.
- */
- i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_alloc_decrement(tcur, level, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- xfs_btree_firstrec(tcur, level);
- /*
- * Grab a pointer to the block.
- */
- lbp = tcur->bc_bufs[level];
- left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
- goto error0;
-#endif
- /*
- * Grab the current block number, for future use.
- */
- bno = be32_to_cpu(left->bb_rightsib);
- /*
- * If left block is full enough so that removing one entry
- * won't make it too empty, and right-shifting an entry out
- * of left to us works, we're done.
- */
- if (be16_to_cpu(left->bb_numrecs) - 1 >=
- XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
- if ((error = xfs_alloc_rshift(tcur, level, &i)))
- goto error0;
- if (i) {
- ASSERT(be16_to_cpu(block->bb_numrecs) >=
- XFS_ALLOC_BLOCK_MINRECS(level, cur));
- xfs_btree_del_cursor(tcur,
- XFS_BTREE_NOERROR);
- if (level == 0)
- cur->bc_ptrs[0]++;
- *stat = 1;
- return 0;
- }
- }
- /*
- * Otherwise, grab the number of records in right for
- * future reference.
- */
- lrecs = be16_to_cpu(left->bb_numrecs);
- }
- /*
- * Delete the temp cursor, we're done with it.
- */
- xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
- /*
- * If here, we need to do a join to keep the tree balanced.
- */
- ASSERT(bno != NULLAGBLOCK);
- /*
- * See if we can join with the left neighbor block.
- */
- if (lbno != NULLAGBLOCK &&
- lrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
- /*
- * Set "right" to be the starting block,
- * "left" to be the left neighbor.
- */
- rbno = bno;
- right = block;
- rrecs = be16_to_cpu(right->bb_numrecs);
- rbp = bp;
- if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
- cur->bc_private.a.agno, lbno, 0, &lbp,
- XFS_ALLOC_BTREE_REF)))
- return error;
- left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
- lrecs = be16_to_cpu(left->bb_numrecs);
- if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
- return error;
- }
- /*
- * If that won't work, see if we can join with the right neighbor block.
- */
- else if (rbno != NULLAGBLOCK &&
- rrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
- /*
- * Set "left" to be the starting block,
- * "right" to be the right neighbor.
- */
- lbno = bno;
- left = block;
- lrecs = be16_to_cpu(left->bb_numrecs);
- lbp = bp;
- if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
- cur->bc_private.a.agno, rbno, 0, &rbp,
- XFS_ALLOC_BTREE_REF)))
- return error;
- right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
- rrecs = be16_to_cpu(right->bb_numrecs);
- if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
- return error;
- }
- /*
- * Otherwise, we can't fix the imbalance.
- * Just return. This is probably a logic error, but it's not fatal.
- */
- else {
- if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i)))
- return error;
- *stat = 1;
+
+ if (bno == NULLAGBLOCK) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
return 0;
}
- /*
- * We're now going to join "left" and "right" by moving all the stuff
- * in "right" to "left" and deleting "right".
- */
- if (level > 0) {
- /*
- * It's a non-leaf. Move keys and pointers.
- */
- lkp = XFS_ALLOC_KEY_ADDR(left, lrecs + 1, cur);
- lpp = XFS_ALLOC_PTR_ADDR(left, lrecs + 1, cur);
- rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
- rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
-#ifdef DEBUG
- for (i = 0; i < rrecs; i++) {
- if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
- return error;
- }
-#endif
- memcpy(lkp, rkp, rrecs * sizeof(*lkp));
- memcpy(lpp, rpp, rrecs * sizeof(*lpp));
- xfs_alloc_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
- xfs_alloc_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
- } else {
- /*
- * It's a leaf. Move records.
- */
- lrp = XFS_ALLOC_REC_ADDR(left, lrecs + 1, cur);
- rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
- memcpy(lrp, rrp, rrecs * sizeof(*lrp));
- xfs_alloc_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
- }
- /*
- * If we joined with the left neighbor, set the buffer in the
- * cursor to the left block, and fix up the index.
- */
- if (bp != lbp) {
- xfs_btree_setbuf(cur, level, lbp);
- cur->bc_ptrs[level] += lrecs;
- }
- /*
- * If we joined with the right neighbor and there's a level above
- * us, increment the cursor at that level.
- */
- else if (level + 1 < cur->bc_nlevels &&
- (error = xfs_alloc_increment(cur, level + 1, &i)))
- return error;
- /*
- * Fix up the number of records in the surviving block.
- */
- lrecs += rrecs;
- left->bb_numrecs = cpu_to_be16(lrecs);
- /*
- * Fix up the right block pointer in the surviving block, and log it.
- */
- left->bb_rightsib = right->bb_rightsib;
- xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
- /*
- * If there is a right sibling now, make it point to the
- * remaining block.
- */
- if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
- xfs_alloc_block_t *rrblock;
- xfs_buf_t *rrbp;
-
- if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
- cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0,
- &rrbp, XFS_ALLOC_BTREE_REF)))
- return error;
- rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
- if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
- return error;
- rrblock->bb_leftsib = cpu_to_be32(lbno);
- xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
- }
- /*
- * Free the deleting block by putting it on the freelist.
- */
- error = xfs_alloc_put_freelist(cur->bc_tp,
- cur->bc_private.a.agbp, NULL, rbno, 1);
+
+ xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
+
+ xfs_trans_agbtree_delta(cur->bc_tp, 1);
+ new->s = cpu_to_be32(bno);
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+}
+
+STATIC int
+xfs_allocbt_free_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ struct xfs_buf *agbp = cur->bc_private.a.agbp;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ xfs_agblock_t bno;
+ int error;
+
+ bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
+ error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
if (error)
return error;
- /*
- * Since blocks move to the free list without the coordination
- * used in xfs_bmap_finish, we can't allow block to be available
- * for reallocation and non-transaction writing (user data)
- * until we know that the transaction that moved it to the free
- * list is permanently on disk. We track the blocks by declaring
- * these blocks as "busy"; the busy list is maintained on a
- * per-ag basis and each transaction records which entries
- * should be removed when the iclog commits to disk. If a
- * busy block is allocated, the iclog is pushed up to the
- * LSN that freed the block.
- */
- xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
+
+ xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+ XFS_EXTENT_BUSY_SKIP_DISCARD);
xfs_trans_agbtree_delta(cur->bc_tp, -1);
- /*
- * Adjust the current level's cursor so that we're left referring
- * to the right node, after we're done.
- * If this leaves the ptr value 0 our caller will fix it up.
- */
- if (level > 0)
- cur->bc_ptrs[level]--;
- /*
- * Return value means the next level up has something to do.
- */
- *stat = 2;
+ xfs_trans_binval(cur->bc_tp, bp);
return 0;
-
-error0:
- xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
- return error;
}
/*
- * Insert one record/level. Return information to the caller
- * allowing the next level up to proceed if necessary.
+ * Update the longest extent in the AGF
*/
-STATIC int /* error */
-xfs_alloc_insrec(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level, /* level to insert record at */
- xfs_agblock_t *bnop, /* i/o: block number inserted */
- xfs_alloc_rec_t *recp, /* i/o: record data inserted */
- xfs_btree_cur_t **curp, /* output: new cursor replacing cur */
- int *stat) /* output: success/failure */
+STATIC void
+xfs_allocbt_update_lastrec(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_rec *rec,
+ int ptr,
+ int reason)
{
- xfs_agf_t *agf; /* allocation group freelist header */
- xfs_alloc_block_t *block; /* btree block record/key lives in */
- xfs_buf_t *bp; /* buffer for block */
- int error; /* error return value */
- int i; /* loop index */
- xfs_alloc_key_t key; /* key value being inserted */
- xfs_alloc_key_t *kp; /* pointer to btree keys */
- xfs_agblock_t nbno; /* block number of allocated block */
- xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */
- xfs_alloc_key_t nkey; /* new key value, from split */
- xfs_alloc_rec_t nrec; /* new record value, for caller */
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+ xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ struct xfs_perag *pag;
+ __be32 len;
int numrecs;
- int optr; /* old ptr value */
- xfs_alloc_ptr_t *pp; /* pointer to btree addresses */
- int ptr; /* index in btree block for this rec */
- xfs_alloc_rec_t *rp; /* pointer to btree records */
- ASSERT(be32_to_cpu(recp->ar_blockcount) > 0);
+ ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
+
+ switch (reason) {
+ case LASTREC_UPDATE:
+ /*
+ * If this is the last leaf block and it's the last record,
+ * then update the size of the longest extent in the AG.
+ */
+ if (ptr != xfs_btree_get_numrecs(block))
+ return;
+ len = rec->alloc.ar_blockcount;
+ break;
+ case LASTREC_INSREC:
+ if (be32_to_cpu(rec->alloc.ar_blockcount) <=
+ be32_to_cpu(agf->agf_longest))
+ return;
+ len = rec->alloc.ar_blockcount;
+ break;
+ case LASTREC_DELREC:
+ numrecs = xfs_btree_get_numrecs(block);
+ if (ptr <= numrecs)
+ return;
+ ASSERT(ptr == numrecs + 1);
- /*
- * GCC doesn't understand the (arguably complex) control flow in
- * this function and complains about uninitialized structure fields
- * without this.
- */
- memset(&nrec, 0, sizeof(nrec));
+ if (numrecs) {
+ xfs_alloc_rec_t *rrp;
- /*
- * If we made it to the root level, allocate a new root block
- * and we're done.
- */
- if (level >= cur->bc_nlevels) {
- XFS_STATS_INC(xs_abt_insrec);
- if ((error = xfs_alloc_newroot(cur, &i)))
- return error;
- *bnop = NULLAGBLOCK;
- *stat = i;
- return 0;
- }
- /*
- * Make a key out of the record data to be inserted, and save it.
- */
- key.ar_startblock = recp->ar_startblock;
- key.ar_blockcount = recp->ar_blockcount;
- optr = ptr = cur->bc_ptrs[level];
- /*
- * If we're off the left edge, return failure.
- */
- if (ptr == 0) {
- *stat = 0;
- return 0;
- }
- XFS_STATS_INC(xs_abt_insrec);
- /*
- * Get pointers to the btree buffer and block.
- */
- bp = cur->bc_bufs[level];
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
- numrecs = be16_to_cpu(block->bb_numrecs);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
- return error;
- /*
- * Check that the new entry is being inserted in the right place.
- */
- if (ptr <= numrecs) {
- if (level == 0) {
- rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
- xfs_btree_check_rec(cur->bc_btnum, recp, rp);
+ rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
+ len = rrp->ar_blockcount;
} else {
- kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur);
- xfs_btree_check_key(cur->bc_btnum, &key, kp);
- }
- }
-#endif
- nbno = NULLAGBLOCK;
- ncur = NULL;
- /*
- * If the block is full, we can't insert the new entry until we
- * make the block un-full.
- */
- if (numrecs == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
- /*
- * First, try shifting an entry to the right neighbor.
- */
- if ((error = xfs_alloc_rshift(cur, level, &i)))
- return error;
- if (i) {
- /* nothing */
+ len = 0;
}
- /*
- * Next, try shifting an entry to the left neighbor.
- */
- else {
- if ((error = xfs_alloc_lshift(cur, level, &i)))
- return error;
- if (i)
- optr = ptr = cur->bc_ptrs[level];
- else {
- /*
- * Next, try splitting the current block in
- * half. If this works we have to re-set our
- * variables because we could be in a
- * different block now.
- */
- if ((error = xfs_alloc_split(cur, level, &nbno,
- &nkey, &ncur, &i)))
- return error;
- if (i) {
- bp = cur->bc_bufs[level];
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
-#ifdef DEBUG
- if ((error =
- xfs_btree_check_sblock(cur,
- block, level, bp)))
- return error;
-#endif
- ptr = cur->bc_ptrs[level];
- nrec.ar_startblock = nkey.ar_startblock;
- nrec.ar_blockcount = nkey.ar_blockcount;
- }
- /*
- * Otherwise the insert fails.
- */
- else {
- *stat = 0;
- return 0;
- }
- }
- }
- }
- /*
- * At this point we know there's room for our new entry in the block
- * we're pointing at.
- */
- numrecs = be16_to_cpu(block->bb_numrecs);
- if (level > 0) {
- /*
- * It's a non-leaf entry. Make a hole for the new data
- * in the key and ptr regions of the block.
- */
- kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
- pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
-#ifdef DEBUG
- for (i = numrecs; i >= ptr; i--) {
- if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
- return error;
- }
-#endif
- memmove(&kp[ptr], &kp[ptr - 1],
- (numrecs - ptr + 1) * sizeof(*kp));
- memmove(&pp[ptr], &pp[ptr - 1],
- (numrecs - ptr + 1) * sizeof(*pp));
-#ifdef DEBUG
- if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
- return error;
-#endif
- /*
- * Now stuff the new data in, bump numrecs and log the new data.
- */
- kp[ptr - 1] = key;
- pp[ptr - 1] = cpu_to_be32(*bnop);
- numrecs++;
- block->bb_numrecs = cpu_to_be16(numrecs);
- xfs_alloc_log_keys(cur, bp, ptr, numrecs);
- xfs_alloc_log_ptrs(cur, bp, ptr, numrecs);
-#ifdef DEBUG
- if (ptr < numrecs)
- xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
- kp + ptr);
-#endif
- } else {
- /*
- * It's a leaf entry. Make a hole for the new record.
- */
- rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
- memmove(&rp[ptr], &rp[ptr - 1],
- (numrecs - ptr + 1) * sizeof(*rp));
- /*
- * Now stuff the new record in, bump numrecs
- * and log the new data.
- */
- rp[ptr - 1] = *recp;
- numrecs++;
- block->bb_numrecs = cpu_to_be16(numrecs);
- xfs_alloc_log_recs(cur, bp, ptr, numrecs);
-#ifdef DEBUG
- if (ptr < numrecs)
- xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
- rp + ptr);
-#endif
- }
- /*
- * Log the new number of records in the btree header.
- */
- xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
- /*
- * If we inserted at the start of a block, update the parents' keys.
- */
- if (optr == 1 && (error = xfs_alloc_updkey(cur, &key, level + 1)))
- return error;
- /*
- * Look to see if the longest extent in the allocation group
- * needs to be updated.
- */
- agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
- if (level == 0 &&
- cur->bc_btnum == XFS_BTNUM_CNT &&
- be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
- be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) {
- /*
- * If this is a leaf in the by-size btree and there
- * is no right sibling block and this block is bigger
- * than the previous longest block, update it.
- */
- agf->agf_longest = recp->ar_blockcount;
- cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest
- = be32_to_cpu(recp->ar_blockcount);
- xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
- XFS_AGF_LONGEST);
- }
- /*
- * Return the new block number, if any.
- * If there is one, give back a record value and a cursor too.
- */
- *bnop = nbno;
- if (nbno != NULLAGBLOCK) {
- *recp = nrec;
- *curp = ncur;
+ break;
+ default:
+ ASSERT(0);
+ return;
}
- *stat = 1;
- return 0;
+
+ agf->agf_longest = len;
+ pag = xfs_perag_get(cur->bc_mp, seqno);
+ pag->pagf_longest = be32_to_cpu(len);
+ xfs_perag_put(pag);
+ xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
}
-/*
- * Log header fields from a btree block.
- */
-STATIC void
-xfs_alloc_log_block(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_buf_t *bp, /* buffer containing btree block */
- int fields) /* mask of fields: XFS_BB_... */
+STATIC int
+xfs_allocbt_get_minrecs(
+ struct xfs_btree_cur *cur,
+ int level)
{
- int first; /* first byte offset logged */
- int last; /* last byte offset logged */
- static const short offsets[] = { /* table of offsets */
- offsetof(xfs_alloc_block_t, bb_magic),
- offsetof(xfs_alloc_block_t, bb_level),
- offsetof(xfs_alloc_block_t, bb_numrecs),
- offsetof(xfs_alloc_block_t, bb_leftsib),
- offsetof(xfs_alloc_block_t, bb_rightsib),
- sizeof(xfs_alloc_block_t)
- };
-
- xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last);
- xfs_trans_log_buf(tp, bp, first, last);
+ return cur->bc_mp->m_alloc_mnr[level != 0];
}
-/*
- * Log keys from a btree block (nonleaf).
- */
-STATIC void
-xfs_alloc_log_keys(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_buf_t *bp, /* buffer containing btree block */
- int kfirst, /* index of first key to log */
- int klast) /* index of last key to log */
+STATIC int
+xfs_allocbt_get_maxrecs(
+ struct xfs_btree_cur *cur,
+ int level)
{
- xfs_alloc_block_t *block; /* btree block to log from */
- int first; /* first byte offset logged */
- xfs_alloc_key_t *kp; /* key pointer in btree block */
- int last; /* last byte offset logged */
-
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
- kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
- first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
- last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
- xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+ return cur->bc_mp->m_alloc_mxr[level != 0];
}
-/*
- * Log block pointer fields from a btree block (nonleaf).
- */
STATIC void
-xfs_alloc_log_ptrs(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_buf_t *bp, /* buffer containing btree block */
- int pfirst, /* index of first pointer to log */
- int plast) /* index of last pointer to log */
+xfs_allocbt_init_key_from_rec(
+ union xfs_btree_key *key,
+ union xfs_btree_rec *rec)
{
- xfs_alloc_block_t *block; /* btree block to log from */
- int first; /* first byte offset logged */
- int last; /* last byte offset logged */
- xfs_alloc_ptr_t *pp; /* block-pointer pointer in btree blk */
-
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
- pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
- first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
- last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
- xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+ ASSERT(rec->alloc.ar_startblock != 0);
+
+ key->alloc.ar_startblock = rec->alloc.ar_startblock;
+ key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
}
-/*
- * Log records from a btree block (leaf).
- */
STATIC void
-xfs_alloc_log_recs(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_buf_t *bp, /* buffer containing btree block */
- int rfirst, /* index of first record to log */
- int rlast) /* index of last record to log */
+xfs_allocbt_init_rec_from_key(
+ union xfs_btree_key *key,
+ union xfs_btree_rec *rec)
{
- xfs_alloc_block_t *block; /* btree block to log from */
- int first; /* first byte offset logged */
- int last; /* last byte offset logged */
- xfs_alloc_rec_t *rp; /* record pointer for btree block */
-
-
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
- rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
-#ifdef DEBUG
- {
- xfs_agf_t *agf;
- xfs_alloc_rec_t *p;
-
- agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
- for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++)
- ASSERT(be32_to_cpu(p->ar_startblock) +
- be32_to_cpu(p->ar_blockcount) <=
- be32_to_cpu(agf->agf_length));
- }
-#endif
- first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
- last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
- xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+ ASSERT(key->alloc.ar_startblock != 0);
+
+ rec->alloc.ar_startblock = key->alloc.ar_startblock;
+ rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
}
-/*
- * Lookup the record. The cursor is made to point to it, based on dir.
- * Return 0 if can't find any such record, 1 for success.
- */
-STATIC int /* error */
-xfs_alloc_lookup(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_lookup_t dir, /* <=, ==, or >= */
- int *stat) /* success/failure */
+STATIC void
+xfs_allocbt_init_rec_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec)
{
- xfs_agblock_t agbno; /* a.g. relative btree block number */
- xfs_agnumber_t agno; /* allocation group number */
- xfs_alloc_block_t *block=NULL; /* current btree block */
- int diff; /* difference for the current key */
- int error; /* error return value */
- int keyno=0; /* current key number */
- int level; /* level in the btree */
- xfs_mount_t *mp; /* file system mount point */
-
- XFS_STATS_INC(xs_abt_lookup);
- /*
- * Get the allocation group header, and the root block number.
- */
- mp = cur->bc_mp;
-
- {
- xfs_agf_t *agf; /* a.g. freespace header */
+ ASSERT(cur->bc_rec.a.ar_startblock != 0);
- agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
- agno = be32_to_cpu(agf->agf_seqno);
- agbno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
- }
- /*
- * Iterate over each level in the btree, starting at the root.
- * For each level above the leaves, find the key we need, based
- * on the lookup record, then follow the corresponding block
- * pointer down to the next level.
- */
- for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
- xfs_buf_t *bp; /* buffer pointer for btree block */
- xfs_daddr_t d; /* disk address of btree block */
-
- /*
- * Get the disk address we're looking for.
- */
- d = XFS_AGB_TO_DADDR(mp, agno, agbno);
- /*
- * If the old buffer at this level is for a different block,
- * throw it away, otherwise just use it.
- */
- bp = cur->bc_bufs[level];
- if (bp && XFS_BUF_ADDR(bp) != d)
- bp = NULL;
- if (!bp) {
- /*
- * Need to get a new buffer. Read it, then
- * set it in the cursor, releasing the old one.
- */
- if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, agno,
- agbno, 0, &bp, XFS_ALLOC_BTREE_REF)))
- return error;
- xfs_btree_setbuf(cur, level, bp);
- /*
- * Point to the btree block, now that we have the buffer
- */
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
- if ((error = xfs_btree_check_sblock(cur, block, level,
- bp)))
- return error;
- } else
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
- /*
- * If we already had a key match at a higher level, we know
- * we need to use the first entry in this block.
- */
- if (diff == 0)
- keyno = 1;
- /*
- * Otherwise we need to search this block. Do a binary search.
- */
- else {
- int high; /* high entry number */
- xfs_alloc_key_t *kkbase=NULL;/* base of keys in block */
- xfs_alloc_rec_t *krbase=NULL;/* base of records in block */
- int low; /* low entry number */
-
- /*
- * Get a pointer to keys or records.
- */
- if (level > 0)
- kkbase = XFS_ALLOC_KEY_ADDR(block, 1, cur);
- else
- krbase = XFS_ALLOC_REC_ADDR(block, 1, cur);
- /*
- * Set low and high entry numbers, 1-based.
- */
- low = 1;
- if (!(high = be16_to_cpu(block->bb_numrecs))) {
- /*
- * If the block is empty, the tree must
- * be an empty leaf.
- */
- ASSERT(level == 0 && cur->bc_nlevels == 1);
- cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
- *stat = 0;
- return 0;
- }
- /*
- * Binary search the block.
- */
- while (low <= high) {
- xfs_extlen_t blockcount; /* key value */
- xfs_agblock_t startblock; /* key value */
-
- XFS_STATS_INC(xs_abt_compare);
- /*
- * keyno is average of low and high.
- */
- keyno = (low + high) >> 1;
- /*
- * Get startblock & blockcount.
- */
- if (level > 0) {
- xfs_alloc_key_t *kkp;
-
- kkp = kkbase + keyno - 1;
- startblock = be32_to_cpu(kkp->ar_startblock);
- blockcount = be32_to_cpu(kkp->ar_blockcount);
- } else {
- xfs_alloc_rec_t *krp;
-
- krp = krbase + keyno - 1;
- startblock = be32_to_cpu(krp->ar_startblock);
- blockcount = be32_to_cpu(krp->ar_blockcount);
- }
- /*
- * Compute difference to get next direction.
- */
- if (cur->bc_btnum == XFS_BTNUM_BNO)
- diff = (int)startblock -
- (int)cur->bc_rec.a.ar_startblock;
- else if (!(diff = (int)blockcount -
- (int)cur->bc_rec.a.ar_blockcount))
- diff = (int)startblock -
- (int)cur->bc_rec.a.ar_startblock;
- /*
- * Less than, move right.
- */
- if (diff < 0)
- low = keyno + 1;
- /*
- * Greater than, move left.
- */
- else if (diff > 0)
- high = keyno - 1;
- /*
- * Equal, we're done.
- */
- else
- break;
- }
- }
- /*
- * If there are more levels, set up for the next level
- * by getting the block number and filling in the cursor.
- */
- if (level > 0) {
- /*
- * If we moved left, need the previous key number,
- * unless there isn't one.
- */
- if (diff > 0 && --keyno < 1)
- keyno = 1;
- agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, keyno, cur));
-#ifdef DEBUG
- if ((error = xfs_btree_check_sptr(cur, agbno, level)))
- return error;
-#endif
- cur->bc_ptrs[level] = keyno;
- }
- }
- /*
- * Done with the search.
- * See if we need to adjust the results.
- */
- if (dir != XFS_LOOKUP_LE && diff < 0) {
- keyno++;
- /*
- * If ge search and we went off the end of the block, but it's
- * not the last block, we're in the wrong block.
- */
- if (dir == XFS_LOOKUP_GE &&
- keyno > be16_to_cpu(block->bb_numrecs) &&
- be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) {
- int i;
-
- cur->bc_ptrs[0] = keyno;
- if ((error = xfs_alloc_increment(cur, 0, &i)))
- return error;
- XFS_WANT_CORRUPTED_RETURN(i == 1);
- *stat = 1;
- return 0;
- }
- }
- else if (dir == XFS_LOOKUP_LE && diff > 0)
- keyno--;
- cur->bc_ptrs[0] = keyno;
- /*
- * Return if we succeeded or not.
- */
- if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs))
- *stat = 0;
- else
- *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0));
- return 0;
+ rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
+ rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
}
-/*
- * Move 1 record left from cur/level if possible.
- * Update cur to reflect the new path.
- */
-STATIC int /* error */
-xfs_alloc_lshift(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level, /* level to shift record on */
- int *stat) /* success/failure */
+STATIC void
+xfs_allocbt_init_ptr_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr)
{
- int error; /* error return value */
-#ifdef DEBUG
- int i; /* loop index */
-#endif
- xfs_alloc_key_t key; /* key value for leaf level upward */
- xfs_buf_t *lbp; /* buffer for left neighbor block */
- xfs_alloc_block_t *left; /* left neighbor btree block */
- int nrec; /* new number of left block entries */
- xfs_buf_t *rbp; /* buffer for right (current) block */
- xfs_alloc_block_t *right; /* right (current) btree block */
- xfs_alloc_key_t *rkp=NULL; /* key pointer for right block */
- xfs_alloc_ptr_t *rpp=NULL; /* address pointer for right block */
- xfs_alloc_rec_t *rrp=NULL; /* record pointer for right block */
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
- /*
- * Set up variables for this block as "right".
- */
- rbp = cur->bc_bufs[level];
- right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
- return error;
-#endif
- /*
- * If we've got no left sibling then we can't shift an entry left.
- */
- if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) {
- *stat = 0;
- return 0;
- }
- /*
- * If the cursor entry is the one that would be moved, don't
- * do it... it's too complicated.
- */
- if (cur->bc_ptrs[level] <= 1) {
- *stat = 0;
- return 0;
- }
- /*
- * Set up the left neighbor as "left".
- */
- if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib),
- 0, &lbp, XFS_ALLOC_BTREE_REF)))
- return error;
- left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
- if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
- return error;
- /*
- * If it's full, it can't take another entry.
- */
- if (be16_to_cpu(left->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
- *stat = 0;
- return 0;
- }
- nrec = be16_to_cpu(left->bb_numrecs) + 1;
- /*
- * If non-leaf, copy a key and a ptr to the left block.
- */
- if (level > 0) {
- xfs_alloc_key_t *lkp; /* key pointer for left block */
- xfs_alloc_ptr_t *lpp; /* address pointer for left block */
-
- lkp = XFS_ALLOC_KEY_ADDR(left, nrec, cur);
- rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
- *lkp = *rkp;
- xfs_alloc_log_keys(cur, lbp, nrec, nrec);
- lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur);
- rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level)))
- return error;
-#endif
- *lpp = *rpp;
- xfs_alloc_log_ptrs(cur, lbp, nrec, nrec);
- xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp);
- }
- /*
- * If leaf, copy a record to the left block.
- */
- else {
- xfs_alloc_rec_t *lrp; /* record pointer for left block */
-
- lrp = XFS_ALLOC_REC_ADDR(left, nrec, cur);
- rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
- *lrp = *rrp;
- xfs_alloc_log_recs(cur, lbp, nrec, nrec);
- xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp);
- }
- /*
- * Bump and log left's numrecs, decrement and log right's numrecs.
- */
- be16_add_cpu(&left->bb_numrecs, 1);
- xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
- be16_add_cpu(&right->bb_numrecs, -1);
- xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
- /*
- * Slide the contents of right down one entry.
- */
- if (level > 0) {
-#ifdef DEBUG
- for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
- if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]),
- level)))
- return error;
- }
-#endif
- memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
- memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
- xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
- xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
- } else {
- memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
- xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
- key.ar_startblock = rrp->ar_startblock;
- key.ar_blockcount = rrp->ar_blockcount;
- rkp = &key;
- }
- /*
- * Update the parent key values of right.
- */
- if ((error = xfs_alloc_updkey(cur, rkp, level + 1)))
- return error;
- /*
- * Slide the cursor value left one.
- */
- cur->bc_ptrs[level]--;
- *stat = 1;
- return 0;
+ ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
+
+ ptr->s = agf->agf_roots[cur->bc_btnum];
}
-/*
- * Allocate a new root block, fill it in.
- */
-STATIC int /* error */
-xfs_alloc_newroot(
- xfs_btree_cur_t *cur, /* btree cursor */
- int *stat) /* success/failure */
+STATIC __int64_t
+xfs_allocbt_key_diff(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *key)
{
- int error; /* error return value */
- xfs_agblock_t lbno; /* left block number */
- xfs_buf_t *lbp; /* left btree buffer */
- xfs_alloc_block_t *left; /* left btree block */
- xfs_mount_t *mp; /* mount structure */
- xfs_agblock_t nbno; /* new block number */
- xfs_buf_t *nbp; /* new (root) buffer */
- xfs_alloc_block_t *new; /* new (root) btree block */
- int nptr; /* new value for key index, 1 or 2 */
- xfs_agblock_t rbno; /* right block number */
- xfs_buf_t *rbp; /* right btree buffer */
- xfs_alloc_block_t *right; /* right btree block */
-
- mp = cur->bc_mp;
-
- ASSERT(cur->bc_nlevels < XFS_AG_MAXLEVELS(mp));
- /*
- * Get a buffer from the freelist blocks, for the new root.
- */
- error = xfs_alloc_get_freelist(cur->bc_tp,
- cur->bc_private.a.agbp, &nbno, 1);
- if (error)
- return error;
- /*
- * None available, we fail.
- */
- if (nbno == NULLAGBLOCK) {
- *stat = 0;
- return 0;
- }
- xfs_trans_agbtree_delta(cur->bc_tp, 1);
- nbp = xfs_btree_get_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, nbno,
- 0);
- new = XFS_BUF_TO_ALLOC_BLOCK(nbp);
- /*
- * Set the root data in the a.g. freespace structure.
- */
- {
- xfs_agf_t *agf; /* a.g. freespace header */
- xfs_agnumber_t seqno;
-
- agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
- agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno);
- be32_add_cpu(&agf->agf_levels[cur->bc_btnum], 1);
- seqno = be32_to_cpu(agf->agf_seqno);
- mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++;
- xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
- XFS_AGF_ROOTS | XFS_AGF_LEVELS);
- }
- /*
- * At the previous root level there are now two blocks: the old
- * root, and the new block generated when it was split.
- * We don't know which one the cursor is pointing at, so we
- * set up variables "left" and "right" for each case.
- */
- lbp = cur->bc_bufs[cur->bc_nlevels - 1];
- left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp)))
- return error;
-#endif
- if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
- /*
- * Our block is left, pick up the right block.
- */
- lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp));
- rbno = be32_to_cpu(left->bb_rightsib);
- if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
- cur->bc_private.a.agno, rbno, 0, &rbp,
- XFS_ALLOC_BTREE_REF)))
- return error;
- right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
- if ((error = xfs_btree_check_sblock(cur, right,
- cur->bc_nlevels - 1, rbp)))
- return error;
- nptr = 1;
- } else {
- /*
- * Our block is right, pick up the left block.
- */
- rbp = lbp;
- right = left;
- rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp));
- lbno = be32_to_cpu(right->bb_leftsib);
- if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
- cur->bc_private.a.agno, lbno, 0, &lbp,
- XFS_ALLOC_BTREE_REF)))
- return error;
- left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
- if ((error = xfs_btree_check_sblock(cur, left,
- cur->bc_nlevels - 1, lbp)))
- return error;
- nptr = 2;
- }
- /*
- * Fill in the new block's btree header and log it.
- */
- new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
- new->bb_level = cpu_to_be16(cur->bc_nlevels);
- new->bb_numrecs = cpu_to_be16(2);
- new->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
- new->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
- xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS);
- ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK);
- /*
- * Fill in the key data in the new root.
- */
- {
- xfs_alloc_key_t *kp; /* btree key pointer */
+ xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
+ xfs_alloc_key_t *kp = &key->alloc;
+ __int64_t diff;
- kp = XFS_ALLOC_KEY_ADDR(new, 1, cur);
- if (be16_to_cpu(left->bb_level) > 0) {
- kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur);
- kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);
- } else {
- xfs_alloc_rec_t *rp; /* btree record pointer */
-
- rp = XFS_ALLOC_REC_ADDR(left, 1, cur);
- kp[0].ar_startblock = rp->ar_startblock;
- kp[0].ar_blockcount = rp->ar_blockcount;
- rp = XFS_ALLOC_REC_ADDR(right, 1, cur);
- kp[1].ar_startblock = rp->ar_startblock;
- kp[1].ar_blockcount = rp->ar_blockcount;
- }
+ if (cur->bc_btnum == XFS_BTNUM_BNO) {
+ return (__int64_t)be32_to_cpu(kp->ar_startblock) -
+ rec->ar_startblock;
}
- xfs_alloc_log_keys(cur, nbp, 1, 2);
- /*
- * Fill in the pointer data in the new root.
- */
- {
- xfs_alloc_ptr_t *pp; /* btree address pointer */
- pp = XFS_ALLOC_PTR_ADDR(new, 1, cur);
- pp[0] = cpu_to_be32(lbno);
- pp[1] = cpu_to_be32(rbno);
- }
- xfs_alloc_log_ptrs(cur, nbp, 1, 2);
- /*
- * Fix up the cursor.
- */
- xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
- cur->bc_ptrs[cur->bc_nlevels] = nptr;
- cur->bc_nlevels++;
- *stat = 1;
- return 0;
+ diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
+ if (diff)
+ return diff;
+
+ return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
}
-/*
- * Move 1 record right from cur/level if possible.
- * Update cur to reflect the new path.
- */
-STATIC int /* error */
-xfs_alloc_rshift(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level, /* level to shift record on */
- int *stat) /* success/failure */
+static bool
+xfs_allocbt_verify(
+ struct xfs_buf *bp)
{
- int error; /* error return value */
- int i; /* loop index */
- xfs_alloc_key_t key; /* key value for leaf level upward */
- xfs_buf_t *lbp; /* buffer for left (current) block */
- xfs_alloc_block_t *left; /* left (current) btree block */
- xfs_buf_t *rbp; /* buffer for right neighbor block */
- xfs_alloc_block_t *right; /* right neighbor btree block */
- xfs_alloc_key_t *rkp; /* key pointer for right block */
- xfs_btree_cur_t *tcur; /* temporary cursor */
-
- /*
- * Set up variables for this block as "left".
- */
- lbp = cur->bc_bufs[level];
- left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
- return error;
-#endif
- /*
- * If we've got no right sibling then we can't shift an entry right.
- */
- if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) {
- *stat = 0;
- return 0;
- }
- /*
- * If the cursor entry is the one that would be moved, don't
- * do it... it's too complicated.
- */
- if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) {
- *stat = 0;
- return 0;
- }
- /*
- * Set up the right neighbor as "right".
- */
- if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib),
- 0, &rbp, XFS_ALLOC_BTREE_REF)))
- return error;
- right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
- if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
- return error;
- /*
- * If it's full, it can't take another entry.
- */
- if (be16_to_cpu(right->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
- *stat = 0;
- return 0;
- }
- /*
- * Make a hole at the start of the right neighbor block, then
- * copy the last left block entry to the hole.
- */
- if (level > 0) {
- xfs_alloc_key_t *lkp; /* key pointer for left block */
- xfs_alloc_ptr_t *lpp; /* address pointer for left block */
- xfs_alloc_ptr_t *rpp; /* address pointer for right block */
-
- lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
- lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
- rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
- rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
-#ifdef DEBUG
- for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) {
- if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
- return error;
- }
-#endif
- memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
- memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
-#ifdef DEBUG
- if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level)))
- return error;
-#endif
- *rkp = *lkp;
- *rpp = *lpp;
- xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
- xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
- xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1);
- } else {
- xfs_alloc_rec_t *lrp; /* record pointer for left block */
- xfs_alloc_rec_t *rrp; /* record pointer for right block */
-
- lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur);
- rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
- memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
- *rrp = *lrp;
- xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
- key.ar_startblock = rrp->ar_startblock;
- key.ar_blockcount = rrp->ar_blockcount;
- rkp = &key;
- xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1);
- }
- /*
- * Decrement and log left's numrecs, bump and log right's numrecs.
- */
- be16_add_cpu(&left->bb_numrecs, -1);
- xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
- be16_add_cpu(&right->bb_numrecs, 1);
- xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
- /*
- * Using a temporary cursor, update the parent key values of the
- * block on the right.
- */
- if ((error = xfs_btree_dup_cursor(cur, &tcur)))
- return error;
- i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_alloc_increment(tcur, level, &i)) ||
- (error = xfs_alloc_updkey(tcur, rkp, level + 1)))
- goto error0;
- xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
- *stat = 1;
- return 0;
-error0:
- xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
- return error;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ struct xfs_perag *pag = bp->b_pag;
+ unsigned int level;
+
+ /*
+ * magic number and level verification
+ *
+ * During growfs operations, we can't verify the exact level or owner as
+ * the perag is not fully initialised and hence not attached to the
+ * buffer. In this case, check against the maximum tree depth.
+ *
+ * Similarly, during log recovery we will have a perag structure
+ * attached, but the agf information will not yet have been initialised
+ * from the on disk AGF. Again, we can only check against maximum limits
+ * in this case.
+ */
+ level = be16_to_cpu(block->bb_level);
+ switch (block->bb_magic) {
+ case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return false;
+ if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
+ return false;
+ if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
+ return false;
+ if (pag &&
+ be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
+ return false;
+ /* fall through */
+ case cpu_to_be32(XFS_ABTB_MAGIC):
+ if (pag && pag->pagf_init) {
+ if (level >= pag->pagf_levels[XFS_BTNUM_BNOi])
+ return false;
+ } else if (level >= mp->m_ag_maxlevels)
+ return false;
+ break;
+ case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return false;
+ if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
+ return false;
+ if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
+ return false;
+ if (pag &&
+ be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
+ return false;
+ /* fall through */
+ case cpu_to_be32(XFS_ABTC_MAGIC):
+ if (pag && pag->pagf_init) {
+ if (level >= pag->pagf_levels[XFS_BTNUM_CNTi])
+ return false;
+ } else if (level >= mp->m_ag_maxlevels)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ /* numrecs verification */
+ if (be16_to_cpu(block->bb_numrecs) > mp->m_alloc_mxr[level != 0])
+ return false;
+
+ /* sibling pointer verification */
+ if (!block->bb_u.s.bb_leftsib ||
+ (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
+ block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
+ return false;
+ if (!block->bb_u.s.bb_rightsib ||
+ (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
+ block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
+ return false;
+
+ return true;
}
-/*
- * Split cur/level block in half.
- * Return new block number and its first record (to be inserted into parent).
- */
-STATIC int /* error */
-xfs_alloc_split(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level, /* level to split */
- xfs_agblock_t *bnop, /* output: block number allocated */
- xfs_alloc_key_t *keyp, /* output: first key of new block */
- xfs_btree_cur_t **curp, /* output: new cursor */
- int *stat) /* success/failure */
+static void
+xfs_allocbt_read_verify(
+ struct xfs_buf *bp)
{
- int error; /* error return value */
- int i; /* loop index/record number */
- xfs_agblock_t lbno; /* left (current) block number */
- xfs_buf_t *lbp; /* buffer for left block */
- xfs_alloc_block_t *left; /* left (current) btree block */
- xfs_agblock_t rbno; /* right (new) block number */
- xfs_buf_t *rbp; /* buffer for right block */
- xfs_alloc_block_t *right; /* right (new) btree block */
+ if (!xfs_btree_sblock_verify_crc(bp))
+ xfs_buf_ioerror(bp, EFSBADCRC);
+ else if (!xfs_allocbt_verify(bp))
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
- /*
- * Allocate the new block from the freelist.
- * If we can't do it, we're toast. Give up.
- */
- error = xfs_alloc_get_freelist(cur->bc_tp,
- cur->bc_private.a.agbp, &rbno, 1);
- if (error)
- return error;
- if (rbno == NULLAGBLOCK) {
- *stat = 0;
- return 0;
- }
- xfs_trans_agbtree_delta(cur->bc_tp, 1);
- rbp = xfs_btree_get_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agno,
- rbno, 0);
- /*
- * Set up the new block as "right".
- */
- right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
- /*
- * "Left" is the current (according to the cursor) block.
- */
- lbp = cur->bc_bufs[level];
- left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
- return error;
-#endif
- /*
- * Fill in the btree header for the new block.
- */
- right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
- right->bb_level = left->bb_level;
- right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
- /*
- * Make sure that if there's an odd number of entries now, that
- * each new block will have the same number of entries.
- */
- if ((be16_to_cpu(left->bb_numrecs) & 1) &&
- cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
- be16_add_cpu(&right->bb_numrecs, 1);
- i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
- /*
- * For non-leaf blocks, copy keys and addresses over to the new block.
- */
- if (level > 0) {
- xfs_alloc_key_t *lkp; /* left btree key pointer */
- xfs_alloc_ptr_t *lpp; /* left btree address pointer */
- xfs_alloc_key_t *rkp; /* right btree key pointer */
- xfs_alloc_ptr_t *rpp; /* right btree address pointer */
-
- lkp = XFS_ALLOC_KEY_ADDR(left, i, cur);
- lpp = XFS_ALLOC_PTR_ADDR(left, i, cur);
- rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
- rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
-#ifdef DEBUG
- for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
- if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
- return error;
- }
-#endif
- memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
- memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
- xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
- xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
- *keyp = *rkp;
- }
- /*
- * For leaf blocks, copy records over to the new block.
- */
- else {
- xfs_alloc_rec_t *lrp; /* left btree record pointer */
- xfs_alloc_rec_t *rrp; /* right btree record pointer */
-
- lrp = XFS_ALLOC_REC_ADDR(left, i, cur);
- rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
- memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
- xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
- keyp->ar_startblock = rrp->ar_startblock;
- keyp->ar_blockcount = rrp->ar_blockcount;
- }
- /*
- * Find the left block number by looking in the buffer.
- * Adjust numrecs, sibling pointers.
- */
- lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp));
- be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
- right->bb_rightsib = left->bb_rightsib;
- left->bb_rightsib = cpu_to_be32(rbno);
- right->bb_leftsib = cpu_to_be32(lbno);
- xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS);
- xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
- /*
- * If there's a block to the new block's right, make that block
- * point back to right instead of to left.
- */
- if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) {
- xfs_alloc_block_t *rrblock; /* rr btree block */
- xfs_buf_t *rrbp; /* buffer for rrblock */
-
- if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agno, be32_to_cpu(right->bb_rightsib), 0,
- &rrbp, XFS_ALLOC_BTREE_REF)))
- return error;
- rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
- if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
- return error;
- rrblock->bb_leftsib = cpu_to_be32(rbno);
- xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
- }
- /*
- * If the cursor is really in the right block, move it there.
- * If it's just pointing past the last entry in left, then we'll
- * insert there, so don't change anything in that case.
- */
- if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
- xfs_btree_setbuf(cur, level, rbp);
- cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
- }
- /*
- * If there are more levels, we'll need another cursor which refers to
- * the right block, no matter where this cursor was.
- */
- if (level + 1 < cur->bc_nlevels) {
- if ((error = xfs_btree_dup_cursor(cur, curp)))
- return error;
- (*curp)->bc_ptrs[level + 1]++;
+ if (bp->b_error) {
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ xfs_verifier_error(bp);
}
- *bnop = rbno;
- *stat = 1;
- return 0;
}
-/*
- * Update keys at all levels from here to the root along the cursor's path.
- */
-STATIC int /* error */
-xfs_alloc_updkey(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_alloc_key_t *keyp, /* new key value to update to */
- int level) /* starting level for update */
+static void
+xfs_allocbt_write_verify(
+ struct xfs_buf *bp)
{
- int ptr; /* index of key in block */
-
- /*
- * Go up the tree from this level toward the root.
- * At each level, update the key value to the value input.
- * Stop when we reach a level where the cursor isn't pointing
- * at the first entry in the block.
- */
- for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
- xfs_alloc_block_t *block; /* btree block */
- xfs_buf_t *bp; /* buffer for block */
-#ifdef DEBUG
- int error; /* error return value */
-#endif
- xfs_alloc_key_t *kp; /* ptr to btree block keys */
-
- bp = cur->bc_bufs[level];
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
- return error;
-#endif
- ptr = cur->bc_ptrs[level];
- kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur);
- *kp = *keyp;
- xfs_alloc_log_keys(cur, bp, ptr, ptr);
+ if (!xfs_allocbt_verify(bp)) {
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ xfs_verifier_error(bp);
+ return;
}
- return 0;
+ xfs_btree_sblock_calc_crc(bp);
+
}
-/*
- * Externally visible routines.
- */
+const struct xfs_buf_ops xfs_allocbt_buf_ops = {
+ .verify_read = xfs_allocbt_read_verify,
+ .verify_write = xfs_allocbt_write_verify,
+};
-/*
- * Decrement cursor by one record at the level.
- * For nonzero levels the leaf-ward information is untouched.
- */
-int /* error */
-xfs_alloc_decrement(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level, /* level in btree, 0 is leaf */
- int *stat) /* success/failure */
-{
- xfs_alloc_block_t *block; /* btree block */
- int error; /* error return value */
- int lev; /* btree level */
- ASSERT(level < cur->bc_nlevels);
- /*
- * Read-ahead to the left at this level.
- */
- xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
- /*
- * Decrement the ptr at this level. If we're still in the block
- * then we're done.
- */
- if (--cur->bc_ptrs[level] > 0) {
- *stat = 1;
- return 0;
- }
- /*
- * Get a pointer to the btree block.
- */
- block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[level]);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, block, level,
- cur->bc_bufs[level])))
- return error;
-#endif
- /*
- * If we just went off the left edge of the tree, return failure.
- */
- if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) {
- *stat = 0;
- return 0;
- }
- /*
- * March up the tree decrementing pointers.
- * Stop when we don't go off the left edge of a block.
- */
- for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
- if (--cur->bc_ptrs[lev] > 0)
- break;
- /*
- * Read-ahead the left block, we're going to read it
- * in the next loop.
- */
- xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
- }
- /*
- * If we went off the root then we are seriously confused.
- */
- ASSERT(lev < cur->bc_nlevels);
- /*
- * Now walk back down the tree, fixing up the cursor's buffer
- * pointers and key numbers.
- */
- for (block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); lev > level; ) {
- xfs_agblock_t agbno; /* block number of btree block */
- xfs_buf_t *bp; /* buffer pointer for block */
-
- agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
- if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agno, agbno, 0, &bp,
- XFS_ALLOC_BTREE_REF)))
- return error;
- lev--;
- xfs_btree_setbuf(cur, lev, bp);
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
- if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
- return error;
- cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs);
+#if defined(DEBUG) || defined(XFS_WARN)
+STATIC int
+xfs_allocbt_keys_inorder(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *k1,
+ union xfs_btree_key *k2)
+{
+ if (cur->bc_btnum == XFS_BTNUM_BNO) {
+ return be32_to_cpu(k1->alloc.ar_startblock) <
+ be32_to_cpu(k2->alloc.ar_startblock);
+ } else {
+ return be32_to_cpu(k1->alloc.ar_blockcount) <
+ be32_to_cpu(k2->alloc.ar_blockcount) ||
+ (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
+ be32_to_cpu(k1->alloc.ar_startblock) <
+ be32_to_cpu(k2->alloc.ar_startblock));
}
- *stat = 1;
- return 0;
}
-/*
- * Delete the record pointed to by cur.
- * The cursor refers to the place where the record was (could be inserted)
- * when the operation returns.
- */
-int /* error */
-xfs_alloc_delete(
- xfs_btree_cur_t *cur, /* btree cursor */
- int *stat) /* success/failure */
+STATIC int
+xfs_allocbt_recs_inorder(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *r1,
+ union xfs_btree_rec *r2)
{
- int error; /* error return value */
- int i; /* result code */
- int level; /* btree level */
-
- /*
- * Go up the tree, starting at leaf level.
- * If 2 is returned then a join was done; go to the next level.
- * Otherwise we are done.
- */
- for (level = 0, i = 2; i == 2; level++) {
- if ((error = xfs_alloc_delrec(cur, level, &i)))
- return error;
- }
- if (i == 0) {
- for (level = 1; level < cur->bc_nlevels; level++) {
- if (cur->bc_ptrs[level] == 0) {
- if ((error = xfs_alloc_decrement(cur, level, &i)))
- return error;
- break;
- }
- }
+ if (cur->bc_btnum == XFS_BTNUM_BNO) {
+ return be32_to_cpu(r1->alloc.ar_startblock) +
+ be32_to_cpu(r1->alloc.ar_blockcount) <=
+ be32_to_cpu(r2->alloc.ar_startblock);
+ } else {
+ return be32_to_cpu(r1->alloc.ar_blockcount) <
+ be32_to_cpu(r2->alloc.ar_blockcount) ||
+ (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
+ be32_to_cpu(r1->alloc.ar_startblock) <
+ be32_to_cpu(r2->alloc.ar_startblock));
}
- *stat = i;
- return 0;
}
+#endif /* DEBUG */
+
+static const struct xfs_btree_ops xfs_allocbt_ops = {
+ .rec_len = sizeof(xfs_alloc_rec_t),
+ .key_len = sizeof(xfs_alloc_key_t),
+
+ .dup_cursor = xfs_allocbt_dup_cursor,
+ .set_root = xfs_allocbt_set_root,
+ .alloc_block = xfs_allocbt_alloc_block,
+ .free_block = xfs_allocbt_free_block,
+ .update_lastrec = xfs_allocbt_update_lastrec,
+ .get_minrecs = xfs_allocbt_get_minrecs,
+ .get_maxrecs = xfs_allocbt_get_maxrecs,
+ .init_key_from_rec = xfs_allocbt_init_key_from_rec,
+ .init_rec_from_key = xfs_allocbt_init_rec_from_key,
+ .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
+ .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
+ .key_diff = xfs_allocbt_key_diff,
+ .buf_ops = &xfs_allocbt_buf_ops,
+#if defined(DEBUG) || defined(XFS_WARN)
+ .keys_inorder = xfs_allocbt_keys_inorder,
+ .recs_inorder = xfs_allocbt_recs_inorder,
+#endif
+};
/*
- * Get the data from the pointed-to record.
+ * Allocate a new allocation btree cursor.
*/
-int /* error */
-xfs_alloc_get_rec(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_agblock_t *bno, /* output: starting block of extent */
- xfs_extlen_t *len, /* output: length of extent */
- int *stat) /* output: success/failure */
+struct xfs_btree_cur * /* new alloc btree cursor */
+xfs_allocbt_init_cursor(
+ struct xfs_mount *mp, /* file system mount point */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_buf *agbp, /* buffer for agf structure */
+ xfs_agnumber_t agno, /* allocation group number */
+ xfs_btnum_t btnum) /* btree identifier */
{
- xfs_alloc_block_t *block; /* btree block */
-#ifdef DEBUG
- int error; /* error return value */
-#endif
- int ptr; /* record number */
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_btree_cur *cur;
- ptr = cur->bc_ptrs[0];
- block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
- return error;
-#endif
- /*
- * Off the right end or left end, return failure.
- */
- if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
- *stat = 0;
- return 0;
- }
- /*
- * Point to the record and extract its data.
- */
- {
- xfs_alloc_rec_t *rec; /* record data */
+ ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
- rec = XFS_ALLOC_REC_ADDR(block, ptr, cur);
- *bno = be32_to_cpu(rec->ar_startblock);
- *len = be32_to_cpu(rec->ar_blockcount);
- }
- *stat = 1;
- return 0;
-}
+ cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
-/*
- * Increment cursor by one record at the level.
- * For nonzero levels the leaf-ward information is untouched.
- */
-int /* error */
-xfs_alloc_increment(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level, /* level in btree, 0 is leaf */
- int *stat) /* success/failure */
-{
- xfs_alloc_block_t *block; /* btree block */
- xfs_buf_t *bp; /* tree block buffer */
- int error; /* error return value */
- int lev; /* btree level */
+ cur->bc_tp = tp;
+ cur->bc_mp = mp;
+ cur->bc_btnum = btnum;
+ cur->bc_blocklog = mp->m_sb.sb_blocklog;
+ cur->bc_ops = &xfs_allocbt_ops;
- ASSERT(level < cur->bc_nlevels);
- /*
- * Read-ahead to the right at this level.
- */
- xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
- /*
- * Get a pointer to the btree block.
- */
- bp = cur->bc_bufs[level];
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
- return error;
-#endif
- /*
- * Increment the ptr at this level. If we're still in the block
- * then we're done.
- */
- if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) {
- *stat = 1;
- return 0;
- }
- /*
- * If we just went off the right edge of the tree, return failure.
- */
- if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) {
- *stat = 0;
- return 0;
- }
- /*
- * March up the tree incrementing pointers.
- * Stop when we don't go off the right edge of a block.
- */
- for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
- bp = cur->bc_bufs[lev];
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
- return error;
-#endif
- if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs))
- break;
- /*
- * Read-ahead the right block, we're going to read it
- * in the next loop.
- */
- xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
- }
- /*
- * If we went off the root then we are seriously confused.
- */
- ASSERT(lev < cur->bc_nlevels);
- /*
- * Now walk back down the tree, fixing up the cursor's buffer
- * pointers and key numbers.
- */
- for (bp = cur->bc_bufs[lev], block = XFS_BUF_TO_ALLOC_BLOCK(bp);
- lev > level; ) {
- xfs_agblock_t agbno; /* block number of btree block */
-
- agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur));
- if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agno, agbno, 0, &bp,
- XFS_ALLOC_BTREE_REF)))
- return error;
- lev--;
- xfs_btree_setbuf(cur, lev, bp);
- block = XFS_BUF_TO_ALLOC_BLOCK(bp);
- if ((error = xfs_btree_check_sblock(cur, block, lev, bp)))
- return error;
- cur->bc_ptrs[lev] = 1;
+ if (btnum == XFS_BTNUM_CNT) {
+ cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
+ cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
+ } else {
+ cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
}
- *stat = 1;
- return 0;
-}
-
-/*
- * Insert the current record at the point referenced by cur.
- * The cursor may be inconsistent on return if splits have been done.
- */
-int /* error */
-xfs_alloc_insert(
- xfs_btree_cur_t *cur, /* btree cursor */
- int *stat) /* success/failure */
-{
- int error; /* error return value */
- int i; /* result value, 0 for failure */
- int level; /* current level number in btree */
- xfs_agblock_t nbno; /* new block number (split result) */
- xfs_btree_cur_t *ncur; /* new cursor (split result) */
- xfs_alloc_rec_t nrec; /* record being inserted this level */
- xfs_btree_cur_t *pcur; /* previous level's cursor */
-
- level = 0;
- nbno = NULLAGBLOCK;
- nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
- nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
- ncur = NULL;
- pcur = cur;
- /*
- * Loop going up the tree, starting at the leaf level.
- * Stop when we don't get a split block, that must mean that
- * the insert is finished with this level.
- */
- do {
- /*
- * Insert nrec/nbno into this level of the tree.
- * Note if we fail, nbno will be null.
- */
- if ((error = xfs_alloc_insrec(pcur, level++, &nbno, &nrec, &ncur,
- &i))) {
- if (pcur != cur)
- xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
- return error;
- }
- /*
- * See if the cursor we just used is trash.
- * Can't trash the caller's cursor, but otherwise we should
- * if ncur is a new cursor or we're about to be done.
- */
- if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) {
- cur->bc_nlevels = pcur->bc_nlevels;
- xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
- }
- /*
- * If we got a new cursor, switch to it.
- */
- if (ncur) {
- pcur = ncur;
- ncur = NULL;
- }
- } while (nbno != NULLAGBLOCK);
- *stat = i;
- return 0;
-}
-/*
- * Lookup the record equal to [bno, len] in the btree given by cur.
- */
-int /* error */
-xfs_alloc_lookup_eq(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_agblock_t bno, /* starting block of extent */
- xfs_extlen_t len, /* length of extent */
- int *stat) /* success/failure */
-{
- cur->bc_rec.a.ar_startblock = bno;
- cur->bc_rec.a.ar_blockcount = len;
- return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, stat);
-}
+ cur->bc_private.a.agbp = agbp;
+ cur->bc_private.a.agno = agno;
-/*
- * Lookup the first record greater than or equal to [bno, len]
- * in the btree given by cur.
- */
-int /* error */
-xfs_alloc_lookup_ge(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_agblock_t bno, /* starting block of extent */
- xfs_extlen_t len, /* length of extent */
- int *stat) /* success/failure */
-{
- cur->bc_rec.a.ar_startblock = bno;
- cur->bc_rec.a.ar_blockcount = len;
- return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, stat);
-}
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
-/*
- * Lookup the first record less than or equal to [bno, len]
- * in the btree given by cur.
- */
-int /* error */
-xfs_alloc_lookup_le(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_agblock_t bno, /* starting block of extent */
- xfs_extlen_t len, /* length of extent */
- int *stat) /* success/failure */
-{
- cur->bc_rec.a.ar_startblock = bno;
- cur->bc_rec.a.ar_blockcount = len;
- return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, stat);
+ return cur;
}
/*
- * Update the record referred to by cur, to the value given by [bno, len].
- * This either works (return 0) or gets an EFSCORRUPTED error.
+ * Calculate number of records in an alloc btree block.
*/
-int /* error */
-xfs_alloc_update(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_agblock_t bno, /* starting block of extent */
- xfs_extlen_t len) /* length of extent */
+int
+xfs_allocbt_maxrecs(
+ struct xfs_mount *mp,
+ int blocklen,
+ int leaf)
{
- xfs_alloc_block_t *block; /* btree block to update */
- int error; /* error return value */
- int ptr; /* current record number (updating) */
+ blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
- ASSERT(len > 0);
- /*
- * Pick up the a.g. freelist struct and the current block.
- */
- block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
-#ifdef DEBUG
- if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
- return error;
-#endif
- /*
- * Get the address of the rec to be updated.
- */
- ptr = cur->bc_ptrs[0];
- {
- xfs_alloc_rec_t *rp; /* pointer to updated record */
-
- rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
- /*
- * Fill in the new contents and log them.
- */
- rp->ar_startblock = cpu_to_be32(bno);
- rp->ar_blockcount = cpu_to_be32(len);
- xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr);
- }
- /*
- * If it's the by-size btree and it's the last leaf block and
- * it's the last record... then update the size of the longest
- * extent in the a.g., which we cache in the a.g. freelist header.
- */
- if (cur->bc_btnum == XFS_BTNUM_CNT &&
- be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
- ptr == be16_to_cpu(block->bb_numrecs)) {
- xfs_agf_t *agf; /* a.g. freespace header */
- xfs_agnumber_t seqno;
-
- agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
- seqno = be32_to_cpu(agf->agf_seqno);
- cur->bc_mp->m_perag[seqno].pagf_longest = len;
- agf->agf_longest = cpu_to_be32(len);
- xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
- XFS_AGF_LONGEST);
- }
- /*
- * Updating first record in leaf. Pass new key value up to our parent.
- */
- if (ptr == 1) {
- xfs_alloc_key_t key; /* key containing [bno, len] */
-
- key.ar_startblock = cpu_to_be32(bno);
- key.ar_blockcount = cpu_to_be32(len);
- if ((error = xfs_alloc_updkey(cur, &key, 1)))
- return error;
- }
- return 0;
+ if (leaf)
+ return blocklen / sizeof(xfs_alloc_rec_t);
+ return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
}
diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h
index 5bd1a2c8bd0..45e189e7e81 100644
--- a/fs/xfs/xfs_alloc_btree.h
+++ b/fs/xfs/xfs_alloc_btree.h
@@ -24,132 +24,42 @@
struct xfs_buf;
struct xfs_btree_cur;
-struct xfs_btree_sblock;
struct xfs_mount;
/*
- * There are two on-disk btrees, one sorted by blockno and one sorted
- * by blockcount and blockno. All blocks look the same to make the code
- * simpler; if we have time later, we'll make the optimizations.
+ * Btree block header size depends on a superblock flag.
*/
-#define XFS_ABTB_MAGIC 0x41425442 /* 'ABTB' for bno tree */
-#define XFS_ABTC_MAGIC 0x41425443 /* 'ABTC' for cnt tree */
-
-/*
- * Data record/key structure
- */
-typedef struct xfs_alloc_rec {
- __be32 ar_startblock; /* starting block number */
- __be32 ar_blockcount; /* count of free blocks */
-} xfs_alloc_rec_t, xfs_alloc_key_t;
-
-typedef struct xfs_alloc_rec_incore {
- xfs_agblock_t ar_startblock; /* starting block number */
- xfs_extlen_t ar_blockcount; /* count of free blocks */
-} xfs_alloc_rec_incore_t;
-
-/* btree pointer type */
-typedef __be32 xfs_alloc_ptr_t;
-/* btree block header type */
-typedef struct xfs_btree_sblock xfs_alloc_block_t;
-
-#define XFS_BUF_TO_ALLOC_BLOCK(bp) ((xfs_alloc_block_t *)XFS_BUF_PTR(bp))
-
-/*
- * Real block structures have a size equal to the disk block size.
- */
-#define XFS_ALLOC_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_alloc_mxr[lev != 0])
-#define XFS_ALLOC_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_alloc_mnr[lev != 0])
-
-/*
- * Minimum and maximum blocksize and sectorsize.
- * The blocksize upper limit is pretty much arbitrary.
- * The sectorsize upper limit is due to sizeof(sb_sectsize).
- */
-#define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */
-#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */
-#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG)
-#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG)
-#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */
-#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */
-#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG)
-#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG)
-
-/*
- * Block numbers in the AG:
- * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
- */
-#define XFS_BNO_BLOCK(mp) ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1))
-#define XFS_CNT_BLOCK(mp) ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
+#define XFS_ALLOC_BLOCK_LEN(mp) \
+ (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
+ XFS_BTREE_SBLOCK_CRC_LEN : XFS_BTREE_SBLOCK_LEN)
/*
* Record, key, and pointer address macros for btree blocks.
- */
-#define XFS_ALLOC_REC_ADDR(bb,i,cur) \
- XFS_BTREE_REC_ADDR(xfs_alloc, bb, i)
-
-#define XFS_ALLOC_KEY_ADDR(bb,i,cur) \
- XFS_BTREE_KEY_ADDR(xfs_alloc, bb, i)
-
-#define XFS_ALLOC_PTR_ADDR(bb,i,cur) \
- XFS_BTREE_PTR_ADDR(xfs_alloc, bb, i, XFS_ALLOC_BLOCK_MAXRECS(1, cur))
-
-/*
- * Decrement cursor by one record at the level.
- * For nonzero levels the leaf-ward information is untouched.
- */
-extern int xfs_alloc_decrement(struct xfs_btree_cur *cur, int level, int *stat);
-
-/*
- * Delete the record pointed to by cur.
- * The cursor refers to the place where the record was (could be inserted)
- * when the operation returns.
- */
-extern int xfs_alloc_delete(struct xfs_btree_cur *cur, int *stat);
-
-/*
- * Get the data from the pointed-to record.
- */
-extern int xfs_alloc_get_rec(struct xfs_btree_cur *cur, xfs_agblock_t *bno,
- xfs_extlen_t *len, int *stat);
-
-/*
- * Increment cursor by one record at the level.
- * For nonzero levels the leaf-ward information is untouched.
- */
-extern int xfs_alloc_increment(struct xfs_btree_cur *cur, int level, int *stat);
-
-/*
- * Insert the current record at the point referenced by cur.
- * The cursor may be inconsistent on return if splits have been done.
- */
-extern int xfs_alloc_insert(struct xfs_btree_cur *cur, int *stat);
-
-/*
- * Lookup the record equal to [bno, len] in the btree given by cur.
- */
-extern int xfs_alloc_lookup_eq(struct xfs_btree_cur *cur, xfs_agblock_t bno,
- xfs_extlen_t len, int *stat);
-
-/*
- * Lookup the first record greater than or equal to [bno, len]
- * in the btree given by cur.
- */
-extern int xfs_alloc_lookup_ge(struct xfs_btree_cur *cur, xfs_agblock_t bno,
- xfs_extlen_t len, int *stat);
-
-/*
- * Lookup the first record less than or equal to [bno, len]
- * in the btree given by cur.
- */
-extern int xfs_alloc_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno,
- xfs_extlen_t len, int *stat);
-
-/*
- * Update the record referred to by cur, to the value given by [bno, len].
- * This either works (return 0) or gets an EFSCORRUPTED error.
- */
-extern int xfs_alloc_update(struct xfs_btree_cur *cur, xfs_agblock_t bno,
- xfs_extlen_t len);
+ *
+ * (note that some of these may appear unused, but they are used in userspace)
+ */
+#define XFS_ALLOC_REC_ADDR(mp, block, index) \
+ ((xfs_alloc_rec_t *) \
+ ((char *)(block) + \
+ XFS_ALLOC_BLOCK_LEN(mp) + \
+ (((index) - 1) * sizeof(xfs_alloc_rec_t))))
+
+#define XFS_ALLOC_KEY_ADDR(mp, block, index) \
+ ((xfs_alloc_key_t *) \
+ ((char *)(block) + \
+ XFS_ALLOC_BLOCK_LEN(mp) + \
+ ((index) - 1) * sizeof(xfs_alloc_key_t)))
+
+#define XFS_ALLOC_PTR_ADDR(mp, block, index, maxrecs) \
+ ((xfs_alloc_ptr_t *) \
+ ((char *)(block) + \
+ XFS_ALLOC_BLOCK_LEN(mp) + \
+ (maxrecs) * sizeof(xfs_alloc_key_t) + \
+ ((index) - 1) * sizeof(xfs_alloc_ptr_t)))
+
+extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
+ struct xfs_trans *, struct xfs_buf *,
+ xfs_agnumber_t, xfs_btnum_t);
+extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
#endif /* __XFS_ALLOC_BTREE_H__ */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
new file mode 100644
index 00000000000..faaf716e208
--- /dev/null
+++ b/fs/xfs/xfs_aops.c
@@ -0,0 +1,1770 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_inode_item.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_iomap.h"
+#include "xfs_trace.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_dinode.h"
+#include <linux/aio.h>
+#include <linux/gfp.h>
+#include <linux/mpage.h>
+#include <linux/pagevec.h>
+#include <linux/writeback.h>
+
+void
+xfs_count_page_state(
+ struct page *page,
+ int *delalloc,
+ int *unwritten)
+{
+ struct buffer_head *bh, *head;
+
+ *delalloc = *unwritten = 0;
+
+ bh = head = page_buffers(page);
+ do {
+ if (buffer_unwritten(bh))
+ (*unwritten) = 1;
+ else if (buffer_delay(bh))
+ (*delalloc) = 1;
+ } while ((bh = bh->b_this_page) != head);
+}
+
+STATIC struct block_device *
+xfs_find_bdev_for_inode(
+ struct inode *inode)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+
+ if (XFS_IS_REALTIME_INODE(ip))
+ return mp->m_rtdev_targp->bt_bdev;
+ else
+ return mp->m_ddev_targp->bt_bdev;
+}
+
+/*
+ * We're now finished for good with this ioend structure.
+ * Update the page state via the associated buffer_heads,
+ * release holds on the inode and bio, and finally free
+ * up memory. Do not use the ioend after this.
+ */
+STATIC void
+xfs_destroy_ioend(
+ xfs_ioend_t *ioend)
+{
+ struct buffer_head *bh, *next;
+
+ for (bh = ioend->io_buffer_head; bh; bh = next) {
+ next = bh->b_private;
+ bh->b_end_io(bh, !ioend->io_error);
+ }
+
+ mempool_free(ioend, xfs_ioend_pool);
+}
+
+/*
+ * Fast and loose check if this write could update the on-disk inode size.
+ */
+static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
+{
+ return ioend->io_offset + ioend->io_size >
+ XFS_I(ioend->io_inode)->i_d.di_size;
+}
+
+STATIC int
+xfs_setfilesize_trans_alloc(
+ struct xfs_ioend *ioend)
+{
+ struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+
+ ioend->io_append_trans = tp;
+
+ /*
+ * We may pass freeze protection with a transaction. So tell lockdep
+ * we released it.
+ */
+ rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 1, _THIS_IP_);
+ /*
+ * We hand off the transaction to the completion thread now, so
+ * clear the flag here.
+ */
+ current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+ return 0;
+}
+
+/*
+ * Update on-disk file size now that data has been written to disk.
+ */
+STATIC int
+xfs_setfilesize(
+ struct xfs_ioend *ioend)
+{
+ struct xfs_inode *ip = XFS_I(ioend->io_inode);
+ struct xfs_trans *tp = ioend->io_append_trans;
+ xfs_fsize_t isize;
+
+ /*
+ * The transaction may have been allocated in the I/O submission thread,
+ * thus we need to mark ourselves as beeing in a transaction manually.
+ * Similarly for freeze protection.
+ */
+ current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
+ rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 0, 1, _THIS_IP_);
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
+ if (!isize) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_cancel(tp, 0);
+ return 0;
+ }
+
+ trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
+
+ ip->i_d.di_size = isize;
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ return xfs_trans_commit(tp, 0);
+}
+
+/*
+ * Schedule IO completion handling on the final put of an ioend.
+ *
+ * If there is no work to do we might as well call it a day and free the
+ * ioend right now.
+ */
+STATIC void
+xfs_finish_ioend(
+ struct xfs_ioend *ioend)
+{
+ if (atomic_dec_and_test(&ioend->io_remaining)) {
+ struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
+
+ if (ioend->io_type == XFS_IO_UNWRITTEN)
+ queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
+ else if (ioend->io_append_trans ||
+ (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
+ queue_work(mp->m_data_workqueue, &ioend->io_work);
+ else
+ xfs_destroy_ioend(ioend);
+ }
+}
+
+/*
+ * IO write completion.
+ */
+STATIC void
+xfs_end_io(
+ struct work_struct *work)
+{
+ xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
+ struct xfs_inode *ip = XFS_I(ioend->io_inode);
+ int error = 0;
+
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ ioend->io_error = -EIO;
+ goto done;
+ }
+ if (ioend->io_error)
+ goto done;
+
+ /*
+ * For unwritten extents we need to issue transactions to convert a
+ * range to normal written extens after the data I/O has finished.
+ */
+ if (ioend->io_type == XFS_IO_UNWRITTEN) {
+ error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
+ ioend->io_size);
+ } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
+ /*
+ * For direct I/O we do not know if we need to allocate blocks
+ * or not so we can't preallocate an append transaction as that
+ * results in nested reservations and log space deadlocks. Hence
+ * allocate the transaction here. While this is sub-optimal and
+ * can block IO completion for some time, we're stuck with doing
+ * it this way until we can pass the ioend to the direct IO
+ * allocation callbacks and avoid nesting that way.
+ */
+ error = xfs_setfilesize_trans_alloc(ioend);
+ if (error)
+ goto done;
+ error = xfs_setfilesize(ioend);
+ } else if (ioend->io_append_trans) {
+ error = xfs_setfilesize(ioend);
+ } else {
+ ASSERT(!xfs_ioend_is_append(ioend));
+ }
+
+done:
+ if (error)
+ ioend->io_error = -error;
+ xfs_destroy_ioend(ioend);
+}
+
+/*
+ * Call IO completion handling in caller context on the final put of an ioend.
+ */
+STATIC void
+xfs_finish_ioend_sync(
+ struct xfs_ioend *ioend)
+{
+ if (atomic_dec_and_test(&ioend->io_remaining))
+ xfs_end_io(&ioend->io_work);
+}
+
+/*
+ * Allocate and initialise an IO completion structure.
+ * We need to track unwritten extent write completion here initially.
+ * We'll need to extend this for updating the ondisk inode size later
+ * (vs. incore size).
+ */
+STATIC xfs_ioend_t *
+xfs_alloc_ioend(
+ struct inode *inode,
+ unsigned int type)
+{
+ xfs_ioend_t *ioend;
+
+ ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
+
+ /*
+ * Set the count to 1 initially, which will prevent an I/O
+ * completion callback from happening before we have started
+ * all the I/O from calling the completion routine too early.
+ */
+ atomic_set(&ioend->io_remaining, 1);
+ ioend->io_isdirect = 0;
+ ioend->io_error = 0;
+ ioend->io_list = NULL;
+ ioend->io_type = type;
+ ioend->io_inode = inode;
+ ioend->io_buffer_head = NULL;
+ ioend->io_buffer_tail = NULL;
+ ioend->io_offset = 0;
+ ioend->io_size = 0;
+ ioend->io_append_trans = NULL;
+
+ INIT_WORK(&ioend->io_work, xfs_end_io);
+ return ioend;
+}
+
+STATIC int
+xfs_map_blocks(
+ struct inode *inode,
+ loff_t offset,
+ struct xfs_bmbt_irec *imap,
+ int type,
+ int nonblocking)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ ssize_t count = 1 << inode->i_blkbits;
+ xfs_fileoff_t offset_fsb, end_fsb;
+ int error = 0;
+ int bmapi_flags = XFS_BMAPI_ENTIRE;
+ int nimaps = 1;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -XFS_ERROR(EIO);
+
+ if (type == XFS_IO_UNWRITTEN)
+ bmapi_flags |= XFS_BMAPI_IGSTATE;
+
+ if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
+ if (nonblocking)
+ return -XFS_ERROR(EAGAIN);
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ }
+
+ ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
+ (ip->i_df.if_flags & XFS_IFEXTENTS));
+ ASSERT(offset <= mp->m_super->s_maxbytes);
+
+ if (offset + count > mp->m_super->s_maxbytes)
+ count = mp->m_super->s_maxbytes - offset;
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
+ imap, &nimaps, bmapi_flags);
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+ if (error)
+ return -XFS_ERROR(error);
+
+ if (type == XFS_IO_DELALLOC &&
+ (!nimaps || isnullstartblock(imap->br_startblock))) {
+ error = xfs_iomap_write_allocate(ip, offset, imap);
+ if (!error)
+ trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
+ return -XFS_ERROR(error);
+ }
+
+#ifdef DEBUG
+ if (type == XFS_IO_UNWRITTEN) {
+ ASSERT(nimaps);
+ ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+ ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
+ }
+#endif
+ if (nimaps)
+ trace_xfs_map_blocks_found(ip, offset, count, type, imap);
+ return 0;
+}
+
+STATIC int
+xfs_imap_valid(
+ struct inode *inode,
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset)
+{
+ offset >>= inode->i_blkbits;
+
+ return offset >= imap->br_startoff &&
+ offset < imap->br_startoff + imap->br_blockcount;
+}
+
+/*
+ * BIO completion handler for buffered IO.
+ */
+STATIC void
+xfs_end_bio(
+ struct bio *bio,
+ int error)
+{
+ xfs_ioend_t *ioend = bio->bi_private;
+
+ ASSERT(atomic_read(&bio->bi_cnt) >= 1);
+ ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
+
+ /* Toss bio and pass work off to an xfsdatad thread */
+ bio->bi_private = NULL;
+ bio->bi_end_io = NULL;
+ bio_put(bio);
+
+ xfs_finish_ioend(ioend);
+}
+
+STATIC void
+xfs_submit_ioend_bio(
+ struct writeback_control *wbc,
+ xfs_ioend_t *ioend,
+ struct bio *bio)
+{
+ atomic_inc(&ioend->io_remaining);
+ bio->bi_private = ioend;
+ bio->bi_end_io = xfs_end_bio;
+ submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
+}
+
+STATIC struct bio *
+xfs_alloc_ioend_bio(
+ struct buffer_head *bh)
+{
+ int nvecs = bio_get_nr_vecs(bh->b_bdev);
+ struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
+
+ ASSERT(bio->bi_private == NULL);
+ bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ bio->bi_bdev = bh->b_bdev;
+ return bio;
+}
+
+STATIC void
+xfs_start_buffer_writeback(
+ struct buffer_head *bh)
+{
+ ASSERT(buffer_mapped(bh));
+ ASSERT(buffer_locked(bh));
+ ASSERT(!buffer_delay(bh));
+ ASSERT(!buffer_unwritten(bh));
+
+ mark_buffer_async_write(bh);
+ set_buffer_uptodate(bh);
+ clear_buffer_dirty(bh);
+}
+
+STATIC void
+xfs_start_page_writeback(
+ struct page *page,
+ int clear_dirty,
+ int buffers)
+{
+ ASSERT(PageLocked(page));
+ ASSERT(!PageWriteback(page));
+ if (clear_dirty)
+ clear_page_dirty_for_io(page);
+ set_page_writeback(page);
+ unlock_page(page);
+ /* If no buffers on the page are to be written, finish it here */
+ if (!buffers)
+ end_page_writeback(page);
+}
+
+static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
+{
+ return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+}
+
+/*
+ * Submit all of the bios for all of the ioends we have saved up, covering the
+ * initial writepage page and also any probed pages.
+ *
+ * Because we may have multiple ioends spanning a page, we need to start
+ * writeback on all the buffers before we submit them for I/O. If we mark the
+ * buffers as we got, then we can end up with a page that only has buffers
+ * marked async write and I/O complete on can occur before we mark the other
+ * buffers async write.
+ *
+ * The end result of this is that we trip a bug in end_page_writeback() because
+ * we call it twice for the one page as the code in end_buffer_async_write()
+ * assumes that all buffers on the page are started at the same time.
+ *
+ * The fix is two passes across the ioend list - one to start writeback on the
+ * buffer_heads, and then submit them for I/O on the second pass.
+ *
+ * If @fail is non-zero, it means that we have a situation where some part of
+ * the submission process has failed after we have marked paged for writeback
+ * and unlocked them. In this situation, we need to fail the ioend chain rather
+ * than submit it to IO. This typically only happens on a filesystem shutdown.
+ */
+STATIC void
+xfs_submit_ioend(
+ struct writeback_control *wbc,
+ xfs_ioend_t *ioend,
+ int fail)
+{
+ xfs_ioend_t *head = ioend;
+ xfs_ioend_t *next;
+ struct buffer_head *bh;
+ struct bio *bio;
+ sector_t lastblock = 0;
+
+ /* Pass 1 - start writeback */
+ do {
+ next = ioend->io_list;
+ for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
+ xfs_start_buffer_writeback(bh);
+ } while ((ioend = next) != NULL);
+
+ /* Pass 2 - submit I/O */
+ ioend = head;
+ do {
+ next = ioend->io_list;
+ bio = NULL;
+
+ /*
+ * If we are failing the IO now, just mark the ioend with an
+ * error and finish it. This will run IO completion immediately
+ * as there is only one reference to the ioend at this point in
+ * time.
+ */
+ if (fail) {
+ ioend->io_error = -fail;
+ xfs_finish_ioend(ioend);
+ continue;
+ }
+
+ for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+
+ if (!bio) {
+ retry:
+ bio = xfs_alloc_ioend_bio(bh);
+ } else if (bh->b_blocknr != lastblock + 1) {
+ xfs_submit_ioend_bio(wbc, ioend, bio);
+ goto retry;
+ }
+
+ if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
+ xfs_submit_ioend_bio(wbc, ioend, bio);
+ goto retry;
+ }
+
+ lastblock = bh->b_blocknr;
+ }
+ if (bio)
+ xfs_submit_ioend_bio(wbc, ioend, bio);
+ xfs_finish_ioend(ioend);
+ } while ((ioend = next) != NULL);
+}
+
+/*
+ * Cancel submission of all buffer_heads so far in this endio.
+ * Toss the endio too. Only ever called for the initial page
+ * in a writepage request, so only ever one page.
+ */
+STATIC void
+xfs_cancel_ioend(
+ xfs_ioend_t *ioend)
+{
+ xfs_ioend_t *next;
+ struct buffer_head *bh, *next_bh;
+
+ do {
+ next = ioend->io_list;
+ bh = ioend->io_buffer_head;
+ do {
+ next_bh = bh->b_private;
+ clear_buffer_async_write(bh);
+ unlock_buffer(bh);
+ } while ((bh = next_bh) != NULL);
+
+ mempool_free(ioend, xfs_ioend_pool);
+ } while ((ioend = next) != NULL);
+}
+
+/*
+ * Test to see if we've been building up a completion structure for
+ * earlier buffers -- if so, we try to append to this ioend if we
+ * can, otherwise we finish off any current ioend and start another.
+ * Return true if we've finished the given ioend.
+ */
+STATIC void
+xfs_add_to_ioend(
+ struct inode *inode,
+ struct buffer_head *bh,
+ xfs_off_t offset,
+ unsigned int type,
+ xfs_ioend_t **result,
+ int need_ioend)
+{
+ xfs_ioend_t *ioend = *result;
+
+ if (!ioend || need_ioend || type != ioend->io_type) {
+ xfs_ioend_t *previous = *result;
+
+ ioend = xfs_alloc_ioend(inode, type);
+ ioend->io_offset = offset;
+ ioend->io_buffer_head = bh;
+ ioend->io_buffer_tail = bh;
+ if (previous)
+ previous->io_list = ioend;
+ *result = ioend;
+ } else {
+ ioend->io_buffer_tail->b_private = bh;
+ ioend->io_buffer_tail = bh;
+ }
+
+ bh->b_private = NULL;
+ ioend->io_size += bh->b_size;
+}
+
+STATIC void
+xfs_map_buffer(
+ struct inode *inode,
+ struct buffer_head *bh,
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset)
+{
+ sector_t bn;
+ struct xfs_mount *m = XFS_I(inode)->i_mount;
+ xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
+ xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
+
+ ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+ ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
+
+ bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
+ ((offset - iomap_offset) >> inode->i_blkbits);
+
+ ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
+
+ bh->b_blocknr = bn;
+ set_buffer_mapped(bh);
+}
+
+STATIC void
+xfs_map_at_offset(
+ struct inode *inode,
+ struct buffer_head *bh,
+ struct xfs_bmbt_irec *imap,
+ xfs_off_t offset)
+{
+ ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+ ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
+
+ xfs_map_buffer(inode, bh, imap, offset);
+ set_buffer_mapped(bh);
+ clear_buffer_delay(bh);
+ clear_buffer_unwritten(bh);
+}
+
+/*
+ * Test if a given page contains at least one buffer of a given @type.
+ * If @check_all_buffers is true, then we walk all the buffers in the page to
+ * try to find one of the type passed in. If it is not set, then the caller only
+ * needs to check the first buffer on the page for a match.
+ */
+STATIC bool
+xfs_check_page_type(
+ struct page *page,
+ unsigned int type,
+ bool check_all_buffers)
+{
+ struct buffer_head *bh;
+ struct buffer_head *head;
+
+ if (PageWriteback(page))
+ return false;
+ if (!page->mapping)
+ return false;
+ if (!page_has_buffers(page))
+ return false;
+
+ bh = head = page_buffers(page);
+ do {
+ if (buffer_unwritten(bh)) {
+ if (type == XFS_IO_UNWRITTEN)
+ return true;
+ } else if (buffer_delay(bh)) {
+ if (type == XFS_IO_DELALLOC)
+ return true;
+ } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
+ if (type == XFS_IO_OVERWRITE)
+ return true;
+ }
+
+ /* If we are only checking the first buffer, we are done now. */
+ if (!check_all_buffers)
+ break;
+ } while ((bh = bh->b_this_page) != head);
+
+ return false;
+}
+
+/*
+ * Allocate & map buffers for page given the extent map. Write it out.
+ * except for the original page of a writepage, this is called on
+ * delalloc/unwritten pages only, for the original page it is possible
+ * that the page has no mapping at all.
+ */
+STATIC int
+xfs_convert_page(
+ struct inode *inode,
+ struct page *page,
+ loff_t tindex,
+ struct xfs_bmbt_irec *imap,
+ xfs_ioend_t **ioendp,
+ struct writeback_control *wbc)
+{
+ struct buffer_head *bh, *head;
+ xfs_off_t end_offset;
+ unsigned long p_offset;
+ unsigned int type;
+ int len, page_dirty;
+ int count = 0, done = 0, uptodate = 1;
+ xfs_off_t offset = page_offset(page);
+
+ if (page->index != tindex)
+ goto fail;
+ if (!trylock_page(page))
+ goto fail;
+ if (PageWriteback(page))
+ goto fail_unlock_page;
+ if (page->mapping != inode->i_mapping)
+ goto fail_unlock_page;
+ if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
+ goto fail_unlock_page;
+
+ /*
+ * page_dirty is initially a count of buffers on the page before
+ * EOF and is decremented as we move each into a cleanable state.
+ *
+ * Derivation:
+ *
+ * End offset is the highest offset that this page should represent.
+ * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
+ * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
+ * hence give us the correct page_dirty count. On any other page,
+ * it will be zero and in that case we need page_dirty to be the
+ * count of buffers on the page.
+ */
+ end_offset = min_t(unsigned long long,
+ (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
+ i_size_read(inode));
+
+ /*
+ * If the current map does not span the entire page we are about to try
+ * to write, then give up. The only way we can write a page that spans
+ * multiple mappings in a single writeback iteration is via the
+ * xfs_vm_writepage() function. Data integrity writeback requires the
+ * entire page to be written in a single attempt, otherwise the part of
+ * the page we don't write here doesn't get written as part of the data
+ * integrity sync.
+ *
+ * For normal writeback, we also don't attempt to write partial pages
+ * here as it simply means that write_cache_pages() will see it under
+ * writeback and ignore the page until some point in the future, at
+ * which time this will be the only page in the file that needs
+ * writeback. Hence for more optimal IO patterns, we should always
+ * avoid partial page writeback due to multiple mappings on a page here.
+ */
+ if (!xfs_imap_valid(inode, imap, end_offset))
+ goto fail_unlock_page;
+
+ len = 1 << inode->i_blkbits;
+ p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
+ PAGE_CACHE_SIZE);
+ p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
+ page_dirty = p_offset / len;
+
+ /*
+ * The moment we find a buffer that doesn't match our current type
+ * specification or can't be written, abort the loop and start
+ * writeback. As per the above xfs_imap_valid() check, only
+ * xfs_vm_writepage() can handle partial page writeback fully - we are
+ * limited here to the buffers that are contiguous with the current
+ * ioend, and hence a buffer we can't write breaks that contiguity and
+ * we have to defer the rest of the IO to xfs_vm_writepage().
+ */
+ bh = head = page_buffers(page);
+ do {
+ if (offset >= end_offset)
+ break;
+ if (!buffer_uptodate(bh))
+ uptodate = 0;
+ if (!(PageUptodate(page) || buffer_uptodate(bh))) {
+ done = 1;
+ break;
+ }
+
+ if (buffer_unwritten(bh) || buffer_delay(bh) ||
+ buffer_mapped(bh)) {
+ if (buffer_unwritten(bh))
+ type = XFS_IO_UNWRITTEN;
+ else if (buffer_delay(bh))
+ type = XFS_IO_DELALLOC;
+ else
+ type = XFS_IO_OVERWRITE;
+
+ /*
+ * imap should always be valid because of the above
+ * partial page end_offset check on the imap.
+ */
+ ASSERT(xfs_imap_valid(inode, imap, offset));
+
+ lock_buffer(bh);
+ if (type != XFS_IO_OVERWRITE)
+ xfs_map_at_offset(inode, bh, imap, offset);
+ xfs_add_to_ioend(inode, bh, offset, type,
+ ioendp, done);
+
+ page_dirty--;
+ count++;
+ } else {
+ done = 1;
+ break;
+ }
+ } while (offset += len, (bh = bh->b_this_page) != head);
+
+ if (uptodate && bh == head)
+ SetPageUptodate(page);
+
+ if (count) {
+ if (--wbc->nr_to_write <= 0 &&
+ wbc->sync_mode == WB_SYNC_NONE)
+ done = 1;
+ }
+ xfs_start_page_writeback(page, !page_dirty, count);
+
+ return done;
+ fail_unlock_page:
+ unlock_page(page);
+ fail:
+ return 1;
+}
+
+/*
+ * Convert & write out a cluster of pages in the same extent as defined
+ * by mp and following the start page.
+ */
+STATIC void
+xfs_cluster_write(
+ struct inode *inode,
+ pgoff_t tindex,
+ struct xfs_bmbt_irec *imap,
+ xfs_ioend_t **ioendp,
+ struct writeback_control *wbc,
+ pgoff_t tlast)
+{
+ struct pagevec pvec;
+ int done = 0, i;
+
+ pagevec_init(&pvec, 0);
+ while (!done && tindex <= tlast) {
+ unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
+
+ if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
+ break;
+
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ done = xfs_convert_page(inode, pvec.pages[i], tindex++,
+ imap, ioendp, wbc);
+ if (done)
+ break;
+ }
+
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+}
+
+STATIC void
+xfs_vm_invalidatepage(
+ struct page *page,
+ unsigned int offset,
+ unsigned int length)
+{
+ trace_xfs_invalidatepage(page->mapping->host, page, offset,
+ length);
+ block_invalidatepage(page, offset, length);
+}
+
+/*
+ * If the page has delalloc buffers on it, we need to punch them out before we
+ * invalidate the page. If we don't, we leave a stale delalloc mapping on the
+ * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
+ * is done on that same region - the delalloc extent is returned when none is
+ * supposed to be there.
+ *
+ * We prevent this by truncating away the delalloc regions on the page before
+ * invalidating it. Because they are delalloc, we can do this without needing a
+ * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
+ * truncation without a transaction as there is no space left for block
+ * reservation (typically why we see a ENOSPC in writeback).
+ *
+ * This is not a performance critical path, so for now just do the punching a
+ * buffer head at a time.
+ */
+STATIC void
+xfs_aops_discard_page(
+ struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct buffer_head *bh, *head;
+ loff_t offset = page_offset(page);
+
+ if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
+ goto out_invalidate;
+
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ goto out_invalidate;
+
+ xfs_alert(ip->i_mount,
+ "page discard on page %p, inode 0x%llx, offset %llu.",
+ page, ip->i_ino, offset);
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ bh = head = page_buffers(page);
+ do {
+ int error;
+ xfs_fileoff_t start_fsb;
+
+ if (!buffer_delay(bh))
+ goto next_buffer;
+
+ start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
+ error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
+ if (error) {
+ /* something screwed, just bail */
+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ xfs_alert(ip->i_mount,
+ "page discard unable to remove delalloc mapping.");
+ }
+ break;
+ }
+next_buffer:
+ offset += 1 << inode->i_blkbits;
+
+ } while ((bh = bh->b_this_page) != head);
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+out_invalidate:
+ xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ return;
+}
+
+/*
+ * Write out a dirty page.
+ *
+ * For delalloc space on the page we need to allocate space and flush it.
+ * For unwritten space on the page we need to start the conversion to
+ * regular allocated space.
+ * For any other dirty buffer heads on the page we should flush them.
+ */
+STATIC int
+xfs_vm_writepage(
+ struct page *page,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = page->mapping->host;
+ struct buffer_head *bh, *head;
+ struct xfs_bmbt_irec imap;
+ xfs_ioend_t *ioend = NULL, *iohead = NULL;
+ loff_t offset;
+ unsigned int type;
+ __uint64_t end_offset;
+ pgoff_t end_index, last_index;
+ ssize_t len;
+ int err, imap_valid = 0, uptodate = 1;
+ int count = 0;
+ int nonblocking = 0;
+
+ trace_xfs_writepage(inode, page, 0, 0);
+
+ ASSERT(page_has_buffers(page));
+
+ /*
+ * Refuse to write the page out if we are called from reclaim context.
+ *
+ * This avoids stack overflows when called from deeply used stacks in
+ * random callers for direct reclaim or memcg reclaim. We explicitly
+ * allow reclaim from kswapd as the stack usage there is relatively low.
+ *
+ * This should never happen except in the case of a VM regression so
+ * warn about it.
+ */
+ if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
+ PF_MEMALLOC))
+ goto redirty;
+
+ /*
+ * Given that we do not allow direct reclaim to call us, we should
+ * never be called while in a filesystem transaction.
+ */
+ if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
+ goto redirty;
+
+ /* Is this page beyond the end of the file? */
+ offset = i_size_read(inode);
+ end_index = offset >> PAGE_CACHE_SHIFT;
+ last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
+
+ /*
+ * The page index is less than the end_index, adjust the end_offset
+ * to the highest offset that this page should represent.
+ * -----------------------------------------------------
+ * | file mapping | <EOF> |
+ * -----------------------------------------------------
+ * | Page ... | Page N-2 | Page N-1 | Page N | |
+ * ^--------------------------------^----------|--------
+ * | desired writeback range | see else |
+ * ---------------------------------^------------------|
+ */
+ if (page->index < end_index)
+ end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
+ else {
+ /*
+ * Check whether the page to write out is beyond or straddles
+ * i_size or not.
+ * -------------------------------------------------------
+ * | file mapping | <EOF> |
+ * -------------------------------------------------------
+ * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
+ * ^--------------------------------^-----------|---------
+ * | | Straddles |
+ * ---------------------------------^-----------|--------|
+ */
+ unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
+
+ /*
+ * Skip the page if it is fully outside i_size, e.g. due to a
+ * truncate operation that is in progress. We must redirty the
+ * page so that reclaim stops reclaiming it. Otherwise
+ * xfs_vm_releasepage() is called on it and gets confused.
+ *
+ * Note that the end_index is unsigned long, it would overflow
+ * if the given offset is greater than 16TB on 32-bit system
+ * and if we do check the page is fully outside i_size or not
+ * via "if (page->index >= end_index + 1)" as "end_index + 1"
+ * will be evaluated to 0. Hence this page will be redirtied
+ * and be written out repeatedly which would result in an
+ * infinite loop, the user program that perform this operation
+ * will hang. Instead, we can verify this situation by checking
+ * if the page to write is totally beyond the i_size or if it's
+ * offset is just equal to the EOF.
+ */
+ if (page->index > end_index ||
+ (page->index == end_index && offset_into_page == 0))
+ goto redirty;
+
+ /*
+ * The page straddles i_size. It must be zeroed out on each
+ * and every writepage invocation because it may be mmapped.
+ * "A file is mapped in multiples of the page size. For a file
+ * that is not a multiple of the page size, the remaining
+ * memory is zeroed when mapped, and writes to that region are
+ * not written out to the file."
+ */
+ zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
+
+ /* Adjust the end_offset to the end of file */
+ end_offset = offset;
+ }
+
+ len = 1 << inode->i_blkbits;
+
+ bh = head = page_buffers(page);
+ offset = page_offset(page);
+ type = XFS_IO_OVERWRITE;
+
+ if (wbc->sync_mode == WB_SYNC_NONE)
+ nonblocking = 1;
+
+ do {
+ int new_ioend = 0;
+
+ if (offset >= end_offset)
+ break;
+ if (!buffer_uptodate(bh))
+ uptodate = 0;
+
+ /*
+ * set_page_dirty dirties all buffers in a page, independent
+ * of their state. The dirty state however is entirely
+ * meaningless for holes (!mapped && uptodate), so skip
+ * buffers covering holes here.
+ */
+ if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
+ imap_valid = 0;
+ continue;
+ }
+
+ if (buffer_unwritten(bh)) {
+ if (type != XFS_IO_UNWRITTEN) {
+ type = XFS_IO_UNWRITTEN;
+ imap_valid = 0;
+ }
+ } else if (buffer_delay(bh)) {
+ if (type != XFS_IO_DELALLOC) {
+ type = XFS_IO_DELALLOC;
+ imap_valid = 0;
+ }
+ } else if (buffer_uptodate(bh)) {
+ if (type != XFS_IO_OVERWRITE) {
+ type = XFS_IO_OVERWRITE;
+ imap_valid = 0;
+ }
+ } else {
+ if (PageUptodate(page))
+ ASSERT(buffer_mapped(bh));
+ /*
+ * This buffer is not uptodate and will not be
+ * written to disk. Ensure that we will put any
+ * subsequent writeable buffers into a new
+ * ioend.
+ */
+ imap_valid = 0;
+ continue;
+ }
+
+ if (imap_valid)
+ imap_valid = xfs_imap_valid(inode, &imap, offset);
+ if (!imap_valid) {
+ /*
+ * If we didn't have a valid mapping then we need to
+ * put the new mapping into a separate ioend structure.
+ * This ensures non-contiguous extents always have
+ * separate ioends, which is particularly important
+ * for unwritten extent conversion at I/O completion
+ * time.
+ */
+ new_ioend = 1;
+ err = xfs_map_blocks(inode, offset, &imap, type,
+ nonblocking);
+ if (err)
+ goto error;
+ imap_valid = xfs_imap_valid(inode, &imap, offset);
+ }
+ if (imap_valid) {
+ lock_buffer(bh);
+ if (type != XFS_IO_OVERWRITE)
+ xfs_map_at_offset(inode, bh, &imap, offset);
+ xfs_add_to_ioend(inode, bh, offset, type, &ioend,
+ new_ioend);
+ count++;
+ }
+
+ if (!iohead)
+ iohead = ioend;
+
+ } while (offset += len, ((bh = bh->b_this_page) != head));
+
+ if (uptodate && bh == head)
+ SetPageUptodate(page);
+
+ xfs_start_page_writeback(page, 1, count);
+
+ /* if there is no IO to be submitted for this page, we are done */
+ if (!ioend)
+ return 0;
+
+ ASSERT(iohead);
+
+ /*
+ * Any errors from this point onwards need tobe reported through the IO
+ * completion path as we have marked the initial page as under writeback
+ * and unlocked it.
+ */
+ if (imap_valid) {
+ xfs_off_t end_index;
+
+ end_index = imap.br_startoff + imap.br_blockcount;
+
+ /* to bytes */
+ end_index <<= inode->i_blkbits;
+
+ /* to pages */
+ end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
+
+ /* check against file size */
+ if (end_index > last_index)
+ end_index = last_index;
+
+ xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
+ wbc, end_index);
+ }
+
+
+ /*
+ * Reserve log space if we might write beyond the on-disk inode size.
+ */
+ err = 0;
+ if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
+ err = xfs_setfilesize_trans_alloc(ioend);
+
+ xfs_submit_ioend(wbc, iohead, err);
+
+ return 0;
+
+error:
+ if (iohead)
+ xfs_cancel_ioend(iohead);
+
+ if (err == -EAGAIN)
+ goto redirty;
+
+ xfs_aops_discard_page(page);
+ ClearPageUptodate(page);
+ unlock_page(page);
+ return err;
+
+redirty:
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
+ return 0;
+}
+
+STATIC int
+xfs_vm_writepages(
+ struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
+ return generic_writepages(mapping, wbc);
+}
+
+/*
+ * Called to move a page into cleanable state - and from there
+ * to be released. The page should already be clean. We always
+ * have buffer heads in this call.
+ *
+ * Returns 1 if the page is ok to release, 0 otherwise.
+ */
+STATIC int
+xfs_vm_releasepage(
+ struct page *page,
+ gfp_t gfp_mask)
+{
+ int delalloc, unwritten;
+
+ trace_xfs_releasepage(page->mapping->host, page, 0, 0);
+
+ xfs_count_page_state(page, &delalloc, &unwritten);
+
+ if (WARN_ON_ONCE(delalloc))
+ return 0;
+ if (WARN_ON_ONCE(unwritten))
+ return 0;
+
+ return try_to_free_buffers(page);
+}
+
+STATIC int
+__xfs_get_blocks(
+ struct inode *inode,
+ sector_t iblock,
+ struct buffer_head *bh_result,
+ int create,
+ int direct)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb, end_fsb;
+ int error = 0;
+ int lockmode = 0;
+ struct xfs_bmbt_irec imap;
+ int nimaps = 1;
+ xfs_off_t offset;
+ ssize_t size;
+ int new = 0;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -XFS_ERROR(EIO);
+
+ offset = (xfs_off_t)iblock << inode->i_blkbits;
+ ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
+ size = bh_result->b_size;
+
+ if (!create && direct && offset >= i_size_read(inode))
+ return 0;
+
+ /*
+ * Direct I/O is usually done on preallocated files, so try getting
+ * a block mapping without an exclusive lock first. For buffered
+ * writes we already have the exclusive iolock anyway, so avoiding
+ * a lock roundtrip here by taking the ilock exclusive from the
+ * beginning is a useful micro optimization.
+ */
+ if (create && !direct) {
+ lockmode = XFS_ILOCK_EXCL;
+ xfs_ilock(ip, lockmode);
+ } else {
+ lockmode = xfs_ilock_data_map_shared(ip);
+ }
+
+ ASSERT(offset <= mp->m_super->s_maxbytes);
+ if (offset + size > mp->m_super->s_maxbytes)
+ size = mp->m_super->s_maxbytes - offset;
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
+ error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
+ &imap, &nimaps, XFS_BMAPI_ENTIRE);
+ if (error)
+ goto out_unlock;
+
+ if (create &&
+ (!nimaps ||
+ (imap.br_startblock == HOLESTARTBLOCK ||
+ imap.br_startblock == DELAYSTARTBLOCK))) {
+ if (direct || xfs_get_extsz_hint(ip)) {
+ /*
+ * Drop the ilock in preparation for starting the block
+ * allocation transaction. It will be retaken
+ * exclusively inside xfs_iomap_write_direct for the
+ * actual allocation.
+ */
+ xfs_iunlock(ip, lockmode);
+ error = xfs_iomap_write_direct(ip, offset, size,
+ &imap, nimaps);
+ if (error)
+ return -error;
+ new = 1;
+ } else {
+ /*
+ * Delalloc reservations do not require a transaction,
+ * we can go on without dropping the lock here. If we
+ * are allocating a new delalloc block, make sure that
+ * we set the new flag so that we mark the buffer new so
+ * that we know that it is newly allocated if the write
+ * fails.
+ */
+ if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
+ new = 1;
+ error = xfs_iomap_write_delay(ip, offset, size, &imap);
+ if (error)
+ goto out_unlock;
+
+ xfs_iunlock(ip, lockmode);
+ }
+
+ trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
+ } else if (nimaps) {
+ trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
+ xfs_iunlock(ip, lockmode);
+ } else {
+ trace_xfs_get_blocks_notfound(ip, offset, size);
+ goto out_unlock;
+ }
+
+ if (imap.br_startblock != HOLESTARTBLOCK &&
+ imap.br_startblock != DELAYSTARTBLOCK) {
+ /*
+ * For unwritten extents do not report a disk address on
+ * the read case (treat as if we're reading into a hole).
+ */
+ if (create || !ISUNWRITTEN(&imap))
+ xfs_map_buffer(inode, bh_result, &imap, offset);
+ if (create && ISUNWRITTEN(&imap)) {
+ if (direct) {
+ bh_result->b_private = inode;
+ set_buffer_defer_completion(bh_result);
+ }
+ set_buffer_unwritten(bh_result);
+ }
+ }
+
+ /*
+ * If this is a realtime file, data may be on a different device.
+ * to that pointed to from the buffer_head b_bdev currently.
+ */
+ bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
+
+ /*
+ * If we previously allocated a block out beyond eof and we are now
+ * coming back to use it then we will need to flag it as new even if it
+ * has a disk address.
+ *
+ * With sub-block writes into unwritten extents we also need to mark
+ * the buffer as new so that the unwritten parts of the buffer gets
+ * correctly zeroed.
+ */
+ if (create &&
+ ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
+ (offset >= i_size_read(inode)) ||
+ (new || ISUNWRITTEN(&imap))))
+ set_buffer_new(bh_result);
+
+ if (imap.br_startblock == DELAYSTARTBLOCK) {
+ BUG_ON(direct);
+ if (create) {
+ set_buffer_uptodate(bh_result);
+ set_buffer_mapped(bh_result);
+ set_buffer_delay(bh_result);
+ }
+ }
+
+ /*
+ * If this is O_DIRECT or the mpage code calling tell them how large
+ * the mapping is, so that we can avoid repeated get_blocks calls.
+ *
+ * If the mapping spans EOF, then we have to break the mapping up as the
+ * mapping for blocks beyond EOF must be marked new so that sub block
+ * regions can be correctly zeroed. We can't do this for mappings within
+ * EOF unless the mapping was just allocated or is unwritten, otherwise
+ * the callers would overwrite existing data with zeros. Hence we have
+ * to split the mapping into a range up to and including EOF, and a
+ * second mapping for beyond EOF.
+ */
+ if (direct || size > (1 << inode->i_blkbits)) {
+ xfs_off_t mapping_size;
+
+ mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
+ mapping_size <<= inode->i_blkbits;
+
+ ASSERT(mapping_size > 0);
+ if (mapping_size > size)
+ mapping_size = size;
+ if (offset < i_size_read(inode) &&
+ offset + mapping_size >= i_size_read(inode)) {
+ /* limit mapping to block that spans EOF */
+ mapping_size = roundup_64(i_size_read(inode) - offset,
+ 1 << inode->i_blkbits);
+ }
+ if (mapping_size > LONG_MAX)
+ mapping_size = LONG_MAX;
+
+ bh_result->b_size = mapping_size;
+ }
+
+ return 0;
+
+out_unlock:
+ xfs_iunlock(ip, lockmode);
+ return -error;
+}
+
+int
+xfs_get_blocks(
+ struct inode *inode,
+ sector_t iblock,
+ struct buffer_head *bh_result,
+ int create)
+{
+ return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
+}
+
+STATIC int
+xfs_get_blocks_direct(
+ struct inode *inode,
+ sector_t iblock,
+ struct buffer_head *bh_result,
+ int create)
+{
+ return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
+}
+
+/*
+ * Complete a direct I/O write request.
+ *
+ * If the private argument is non-NULL __xfs_get_blocks signals us that we
+ * need to issue a transaction to convert the range from unwritten to written
+ * extents. In case this is regular synchronous I/O we just call xfs_end_io
+ * to do this and we are done. But in case this was a successful AIO
+ * request this handler is called from interrupt context, from which we
+ * can't start transactions. In that case offload the I/O completion to
+ * the workqueues we also use for buffered I/O completion.
+ */
+STATIC void
+xfs_end_io_direct_write(
+ struct kiocb *iocb,
+ loff_t offset,
+ ssize_t size,
+ void *private)
+{
+ struct xfs_ioend *ioend = iocb->private;
+
+ /*
+ * While the generic direct I/O code updates the inode size, it does
+ * so only after the end_io handler is called, which means our
+ * end_io handler thinks the on-disk size is outside the in-core
+ * size. To prevent this just update it a little bit earlier here.
+ */
+ if (offset + size > i_size_read(ioend->io_inode))
+ i_size_write(ioend->io_inode, offset + size);
+
+ /*
+ * blockdev_direct_IO can return an error even after the I/O
+ * completion handler was called. Thus we need to protect
+ * against double-freeing.
+ */
+ iocb->private = NULL;
+
+ ioend->io_offset = offset;
+ ioend->io_size = size;
+ if (private && size > 0)
+ ioend->io_type = XFS_IO_UNWRITTEN;
+
+ xfs_finish_ioend_sync(ioend);
+}
+
+STATIC ssize_t
+xfs_vm_direct_IO(
+ int rw,
+ struct kiocb *iocb,
+ struct iov_iter *iter,
+ loff_t offset)
+{
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct block_device *bdev = xfs_find_bdev_for_inode(inode);
+ struct xfs_ioend *ioend = NULL;
+ ssize_t ret;
+
+ if (rw & WRITE) {
+ size_t size = iov_iter_count(iter);
+
+ /*
+ * We cannot preallocate a size update transaction here as we
+ * don't know whether allocation is necessary or not. Hence we
+ * can only tell IO completion that one is necessary if we are
+ * not doing unwritten extent conversion.
+ */
+ iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
+ if (offset + size > XFS_I(inode)->i_d.di_size)
+ ioend->io_isdirect = 1;
+
+ ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
+ offset, xfs_get_blocks_direct,
+ xfs_end_io_direct_write, NULL,
+ DIO_ASYNC_EXTEND);
+ if (ret != -EIOCBQUEUED && iocb->private)
+ goto out_destroy_ioend;
+ } else {
+ ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
+ offset, xfs_get_blocks_direct,
+ NULL, NULL, 0);
+ }
+
+ return ret;
+
+out_destroy_ioend:
+ xfs_destroy_ioend(ioend);
+ return ret;
+}
+
+/*
+ * Punch out the delalloc blocks we have already allocated.
+ *
+ * Don't bother with xfs_setattr given that nothing can have made it to disk yet
+ * as the page is still locked at this point.
+ */
+STATIC void
+xfs_vm_kill_delalloc_range(
+ struct inode *inode,
+ loff_t start,
+ loff_t end)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ xfs_fileoff_t start_fsb;
+ xfs_fileoff_t end_fsb;
+ int error;
+
+ start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
+ end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
+ if (end_fsb <= start_fsb)
+ return;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
+ end_fsb - start_fsb);
+ if (error) {
+ /* something screwed, just bail */
+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ xfs_alert(ip->i_mount,
+ "xfs_vm_write_failed: unable to clean up ino %lld",
+ ip->i_ino);
+ }
+ }
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+}
+
+STATIC void
+xfs_vm_write_failed(
+ struct inode *inode,
+ struct page *page,
+ loff_t pos,
+ unsigned len)
+{
+ loff_t block_offset;
+ loff_t block_start;
+ loff_t block_end;
+ loff_t from = pos & (PAGE_CACHE_SIZE - 1);
+ loff_t to = from + len;
+ struct buffer_head *bh, *head;
+
+ /*
+ * The request pos offset might be 32 or 64 bit, this is all fine
+ * on 64-bit platform. However, for 64-bit pos request on 32-bit
+ * platform, the high 32-bit will be masked off if we evaluate the
+ * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
+ * 0xfffff000 as an unsigned long, hence the result is incorrect
+ * which could cause the following ASSERT failed in most cases.
+ * In order to avoid this, we can evaluate the block_offset of the
+ * start of the page by using shifts rather than masks the mismatch
+ * problem.
+ */
+ block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
+
+ ASSERT(block_offset + from == pos);
+
+ head = page_buffers(page);
+ block_start = 0;
+ for (bh = head; bh != head || !block_start;
+ bh = bh->b_this_page, block_start = block_end,
+ block_offset += bh->b_size) {
+ block_end = block_start + bh->b_size;
+
+ /* skip buffers before the write */
+ if (block_end <= from)
+ continue;
+
+ /* if the buffer is after the write, we're done */
+ if (block_start >= to)
+ break;
+
+ if (!buffer_delay(bh))
+ continue;
+
+ if (!buffer_new(bh) && block_offset < i_size_read(inode))
+ continue;
+
+ xfs_vm_kill_delalloc_range(inode, block_offset,
+ block_offset + bh->b_size);
+
+ /*
+ * This buffer does not contain data anymore. make sure anyone
+ * who finds it knows that for certain.
+ */
+ clear_buffer_delay(bh);
+ clear_buffer_uptodate(bh);
+ clear_buffer_mapped(bh);
+ clear_buffer_new(bh);
+ clear_buffer_dirty(bh);
+ }
+
+}
+
+/*
+ * This used to call block_write_begin(), but it unlocks and releases the page
+ * on error, and we need that page to be able to punch stale delalloc blocks out
+ * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
+ * the appropriate point.
+ */
+STATIC int
+xfs_vm_write_begin(
+ struct file *file,
+ struct address_space *mapping,
+ loff_t pos,
+ unsigned len,
+ unsigned flags,
+ struct page **pagep,
+ void **fsdata)
+{
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ struct page *page;
+ int status;
+
+ ASSERT(len <= PAGE_CACHE_SIZE);
+
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+
+ status = __block_write_begin(page, pos, len, xfs_get_blocks);
+ if (unlikely(status)) {
+ struct inode *inode = mapping->host;
+ size_t isize = i_size_read(inode);
+
+ xfs_vm_write_failed(inode, page, pos, len);
+ unlock_page(page);
+
+ /*
+ * If the write is beyond EOF, we only want to kill blocks
+ * allocated in this write, not blocks that were previously
+ * written successfully.
+ */
+ if (pos + len > isize) {
+ ssize_t start = max_t(ssize_t, pos, isize);
+
+ truncate_pagecache_range(inode, start, pos + len);
+ }
+
+ page_cache_release(page);
+ page = NULL;
+ }
+
+ *pagep = page;
+ return status;
+}
+
+/*
+ * On failure, we only need to kill delalloc blocks beyond EOF in the range of
+ * this specific write because they will never be written. Previous writes
+ * beyond EOF where block allocation succeeded do not need to be trashed, so
+ * only new blocks from this write should be trashed. For blocks within
+ * EOF, generic_write_end() zeros them so they are safe to leave alone and be
+ * written with all the other valid data.
+ */
+STATIC int
+xfs_vm_write_end(
+ struct file *file,
+ struct address_space *mapping,
+ loff_t pos,
+ unsigned len,
+ unsigned copied,
+ struct page *page,
+ void *fsdata)
+{
+ int ret;
+
+ ASSERT(len <= PAGE_CACHE_SIZE);
+
+ ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+ if (unlikely(ret < len)) {
+ struct inode *inode = mapping->host;
+ size_t isize = i_size_read(inode);
+ loff_t to = pos + len;
+
+ if (to > isize) {
+ /* only kill blocks in this write beyond EOF */
+ if (pos > isize)
+ isize = pos;
+ xfs_vm_kill_delalloc_range(inode, isize, to);
+ truncate_pagecache_range(inode, isize, to);
+ }
+ }
+ return ret;
+}
+
+STATIC sector_t
+xfs_vm_bmap(
+ struct address_space *mapping,
+ sector_t block)
+{
+ struct inode *inode = (struct inode *)mapping->host;
+ struct xfs_inode *ip = XFS_I(inode);
+
+ trace_xfs_vm_bmap(XFS_I(inode));
+ xfs_ilock(ip, XFS_IOLOCK_SHARED);
+ filemap_write_and_wait(mapping);
+ xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+ return generic_block_bmap(mapping, block, xfs_get_blocks);
+}
+
+STATIC int
+xfs_vm_readpage(
+ struct file *unused,
+ struct page *page)
+{
+ return mpage_readpage(page, xfs_get_blocks);
+}
+
+STATIC int
+xfs_vm_readpages(
+ struct file *unused,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned nr_pages)
+{
+ return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
+}
+
+const struct address_space_operations xfs_address_space_operations = {
+ .readpage = xfs_vm_readpage,
+ .readpages = xfs_vm_readpages,
+ .writepage = xfs_vm_writepage,
+ .writepages = xfs_vm_writepages,
+ .releasepage = xfs_vm_releasepage,
+ .invalidatepage = xfs_vm_invalidatepage,
+ .write_begin = xfs_vm_write_begin,
+ .write_end = xfs_vm_write_end,
+ .bmap = xfs_vm_bmap,
+ .direct_IO = xfs_vm_direct_IO,
+ .migratepage = buffer_migrate_page,
+ .is_partially_uptodate = block_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
+};
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/xfs_aops.h
index 3ba0631a381..f94dd459dff 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -18,10 +18,23 @@
#ifndef __XFS_AOPS_H__
#define __XFS_AOPS_H__
-extern struct workqueue_struct *xfsdatad_workqueue;
extern mempool_t *xfs_ioend_pool;
-typedef void (*xfs_ioend_func_t)(void *);
+/*
+ * Types of I/O for bmap clustering and I/O completion tracking.
+ */
+enum {
+ XFS_IO_DIRECT = 0, /* special case for direct I/O ioends */
+ XFS_IO_DELALLOC, /* covers delalloc region */
+ XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
+ XFS_IO_OVERWRITE, /* covers already allocated extent */
+};
+
+#define XFS_IO_TYPES \
+ { 0, "" }, \
+ { XFS_IO_DELALLOC, "delalloc" }, \
+ { XFS_IO_UNWRITTEN, "unwritten" }, \
+ { XFS_IO_OVERWRITE, "overwrite" }
/*
* xfs_ioend struct manages large extent writes for XFS.
@@ -32,15 +45,19 @@ typedef struct xfs_ioend {
unsigned int io_type; /* delalloc / unwritten */
int io_error; /* I/O error code */
atomic_t io_remaining; /* hold count */
+ unsigned int io_isdirect : 1;/* direct I/O */
struct inode *io_inode; /* file being written to */
struct buffer_head *io_buffer_head;/* buffer linked list head */
struct buffer_head *io_buffer_tail;/* buffer linked list tail */
size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */
struct work_struct io_work; /* xfsdatad work queue */
+ struct xfs_trans *io_append_trans;/* xact. for size update */
} xfs_ioend_t;
extern const struct address_space_operations xfs_address_space_operations;
extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
+extern void xfs_count_page_state(struct page *, int *, int *);
+
#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
deleted file mode 100644
index f9472a2076d..00000000000
--- a/fs/xfs/xfs_arch.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_ARCH_H__
-#define __XFS_ARCH_H__
-
-#ifndef XFS_BIG_INUMS
-# error XFS_BIG_INUMS must be defined true or false
-#endif
-
-#ifdef __KERNEL__
-
-#include <asm/byteorder.h>
-
-#ifdef __BIG_ENDIAN
-#define XFS_NATIVE_HOST 1
-#else
-#undef XFS_NATIVE_HOST
-#endif
-
-#else /* __KERNEL__ */
-
-#if __BYTE_ORDER == __BIG_ENDIAN
-#define XFS_NATIVE_HOST 1
-#else
-#undef XFS_NATIVE_HOST
-#endif
-
-#ifdef XFS_NATIVE_HOST
-#define cpu_to_be16(val) ((__be16)(val))
-#define cpu_to_be32(val) ((__be32)(val))
-#define cpu_to_be64(val) ((__be64)(val))
-#define be16_to_cpu(val) ((__uint16_t)(val))
-#define be32_to_cpu(val) ((__uint32_t)(val))
-#define be64_to_cpu(val) ((__uint64_t)(val))
-#else
-#define cpu_to_be16(val) (__swab16((__uint16_t)(val)))
-#define cpu_to_be32(val) (__swab32((__uint32_t)(val)))
-#define cpu_to_be64(val) (__swab64((__uint64_t)(val)))
-#define be16_to_cpu(val) (__swab16((__be16)(val)))
-#define be32_to_cpu(val) (__swab32((__be32)(val)))
-#define be64_to_cpu(val) (__swab64((__be64)(val)))
-#endif
-
-#endif /* __KERNEL__ */
-
-/* do we need conversion? */
-#define ARCH_NOCONVERT 1
-#ifdef XFS_NATIVE_HOST
-# define ARCH_CONVERT ARCH_NOCONVERT
-#else
-# define ARCH_CONVERT 0
-#endif
-
-/* generic swapping macros */
-
-#ifndef HAVE_SWABMACROS
-#define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var))))
-#define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var))))
-#define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var))))
-#endif
-
-#define INT_SWAP(type, var) \
- ((sizeof(type) == 8) ? INT_SWAP64(type,var) : \
- ((sizeof(type) == 4) ? INT_SWAP32(type,var) : \
- ((sizeof(type) == 2) ? INT_SWAP16(type,var) : \
- (var))))
-
-/*
- * get and set integers from potentially unaligned locations
- */
-
-#define INT_GET_UNALIGNED_16_BE(pointer) \
- ((__u16)((((__u8*)(pointer))[0] << 8) | (((__u8*)(pointer))[1])))
-#define INT_SET_UNALIGNED_16_BE(pointer,value) \
- { \
- ((__u8*)(pointer))[0] = (((value) >> 8) & 0xff); \
- ((__u8*)(pointer))[1] = (((value) ) & 0xff); \
- }
-
-/* define generic INT_ macros */
-
-#define INT_GET(reference,arch) \
- (((arch) == ARCH_NOCONVERT) \
- ? \
- (reference) \
- : \
- INT_SWAP((reference),(reference)) \
- )
-
-/* does not return a value */
-#define INT_SET(reference,arch,valueref) \
- (__builtin_constant_p(valueref) ? \
- (void)( (reference) = ( ((arch) != ARCH_NOCONVERT) ? (INT_SWAP((reference),(valueref))) : (valueref)) ) : \
- (void)( \
- ((reference) = (valueref)), \
- ( ((arch) != ARCH_NOCONVERT) ? (reference) = INT_SWAP((reference),(reference)) : 0 ) \
- ) \
- )
-
-/* does not return a value */
-#define INT_MOD_EXPR(reference,arch,code) \
- (((arch) == ARCH_NOCONVERT) \
- ? \
- (void)((reference) code) \
- : \
- (void)( \
- (reference) = INT_GET((reference),arch) , \
- ((reference) code), \
- INT_SET(reference, arch, reference) \
- ) \
- )
-
-/* does not return a value */
-#define INT_MOD(reference,arch,delta) \
- (void)( \
- INT_MOD_EXPR(reference,arch,+=(delta)) \
- )
-
-/*
- * INT_COPY - copy a value between two locations with the
- * _same architecture_ but _potentially different sizes_
- *
- * if the types of the two parameters are equal or they are
- * in native architecture, a simple copy is done
- *
- * otherwise, architecture conversions are done
- *
- */
-
-/* does not return a value */
-#define INT_COPY(dst,src,arch) \
- ( \
- ((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \
- ? \
- (void)((dst) = (src)) \
- : \
- INT_SET(dst, arch, INT_GET(src, arch)) \
- )
-
-/*
- * INT_XLATE - copy a value in either direction between two locations
- * with different architectures
- *
- * dir < 0 - copy from memory to buffer (native to arch)
- * dir > 0 - copy from buffer to memory (arch to native)
- */
-
-/* does not return a value */
-#define INT_XLATE(buf,mem,dir,arch) {\
- ASSERT(dir); \
- if (dir>0) { \
- (mem)=INT_GET(buf, arch); \
- } else { \
- INT_SET(buf, arch, mem); \
- } \
-}
-
-/*
- * In directories inode numbers are stored as unaligned arrays of unsigned
- * 8bit integers on disk.
- *
- * For v1 directories or v2 directories that contain inode numbers that
- * do not fit into 32bit the array has eight members, but the first member
- * is always zero:
- *
- * |unused|48-55|40-47|32-39|24-31|16-23| 8-15| 0- 7|
- *
- * For v2 directories that only contain entries with inode numbers that fit
- * into 32bits a four-member array is used:
- *
- * |24-31|16-23| 8-15| 0- 7|
- */
-
-#define XFS_GET_DIR_INO4(di) \
- (((__u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
-
-#define XFS_PUT_DIR_INO4(from, di) \
-do { \
- (di).i[0] = (((from) & 0xff000000ULL) >> 24); \
- (di).i[1] = (((from) & 0x00ff0000ULL) >> 16); \
- (di).i[2] = (((from) & 0x0000ff00ULL) >> 8); \
- (di).i[3] = ((from) & 0x000000ffULL); \
-} while (0)
-
-#define XFS_DI_HI(di) \
- (((__u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
-#define XFS_DI_LO(di) \
- (((__u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
-
-#define XFS_GET_DIR_INO8(di) \
- (((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \
- ((xfs_ino_t)XFS_DI_HI(di) << 32))
-
-#define XFS_PUT_DIR_INO8(from, di) \
-do { \
- (di).i[0] = 0; \
- (di).i[1] = (((from) & 0x00ff000000000000ULL) >> 48); \
- (di).i[2] = (((from) & 0x0000ff0000000000ULL) >> 40); \
- (di).i[3] = (((from) & 0x000000ff00000000ULL) >> 32); \
- (di).i[4] = (((from) & 0x00000000ff000000ULL) >> 24); \
- (di).i[5] = (((from) & 0x0000000000ff0000ULL) >> 16); \
- (di).i[6] = (((from) & 0x000000000000ff00ULL) >> 8); \
- (di).i[7] = ((from) & 0x00000000000000ffULL); \
-} while (0)
-
-#endif /* __XFS_ARCH_H__ */
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index df151a85918..bfe36fc2cdc 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -15,41 +15,34 @@
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-
-#include <linux/capability.h>
-
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
-#include "xfs_btree.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
+#include "xfs_attr_remote.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
-#include "xfs_acl.h"
-#include "xfs_rw.h"
-#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
+#include "xfs_dinode.h"
/*
* xfs_attr.c
@@ -57,11 +50,6 @@
* Provide the external interfaces to manage attribute lists.
*/
-#define ATTR_SYSCOUNT 2
-static struct attrnames posix_acl_access;
-static struct attrnames posix_acl_default;
-static struct attrnames *attr_system_names[ATTR_SYSCOUNT];
-
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
@@ -77,7 +65,6 @@ STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_get(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args);
-STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context);
/*
* Internal routines when attribute list is more than one block.
@@ -85,135 +72,163 @@ STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context);
STATIC int xfs_attr_node_get(xfs_da_args_t *args);
STATIC int xfs_attr_node_addname(xfs_da_args_t *args);
STATIC int xfs_attr_node_removename(xfs_da_args_t *args);
-STATIC int xfs_attr_node_list(xfs_attr_list_context_t *context);
STATIC int xfs_attr_fillstate(xfs_da_state_t *state);
STATIC int xfs_attr_refillstate(xfs_da_state_t *state);
-/*
- * Routines to manipulate out-of-line attribute values.
- */
-STATIC int xfs_attr_rmtval_set(xfs_da_args_t *args);
-STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
-
-#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
-
-#if defined(XFS_ATTR_TRACE)
-ktrace_t *xfs_attr_trace_buf;
-#endif
STATIC int
-xfs_attr_name_to_xname(
- struct xfs_name *xname,
- const char *aname)
+xfs_attr_args_init(
+ struct xfs_da_args *args,
+ struct xfs_inode *dp,
+ const unsigned char *name,
+ int flags)
{
- if (!aname)
+
+ if (!name)
return EINVAL;
- xname->name = aname;
- xname->len = strlen(aname);
- if (xname->len >= MAXNAMELEN)
+
+ memset(args, 0, sizeof(*args));
+ args->geo = dp->i_mount->m_attr_geo;
+ args->whichfork = XFS_ATTR_FORK;
+ args->dp = dp;
+ args->flags = flags;
+ args->name = name;
+ args->namelen = strlen((const char *)name);
+ if (args->namelen >= MAXNAMELEN)
return EFAULT; /* match IRIX behaviour */
+ args->hashval = xfs_da_hashname(args->name, args->namelen);
return 0;
}
+int
+xfs_inode_hasattr(
+ struct xfs_inode *ip)
+{
+ if (!XFS_IFORK_Q(ip) ||
+ (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+ ip->i_d.di_anextents == 0))
+ return 0;
+ return 1;
+}
+
/*========================================================================
* Overall external interface routines.
*========================================================================*/
int
-xfs_attr_fetch(xfs_inode_t *ip, struct xfs_name *name,
- char *value, int *valuelenp, int flags)
+xfs_attr_get(
+ struct xfs_inode *ip,
+ const unsigned char *name,
+ unsigned char *value,
+ int *valuelenp,
+ int flags)
{
- xfs_da_args_t args;
- int error;
+ struct xfs_da_args args;
+ uint lock_mode;
+ int error;
- if ((XFS_IFORK_Q(ip) == 0) ||
- (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_anextents == 0))
- return(ENOATTR);
+ XFS_STATS_INC(xs_attr_get);
+
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ return EIO;
+
+ if (!xfs_inode_hasattr(ip))
+ return ENOATTR;
+
+ error = xfs_attr_args_init(&args, ip, name, flags);
+ if (error)
+ return error;
- /*
- * Fill in the arg structure for this request.
- */
- memset((char *)&args, 0, sizeof(args));
- args.name = name->name;
- args.namelen = name->len;
args.value = value;
args.valuelen = *valuelenp;
- args.flags = flags;
- args.hashval = xfs_da_hashname(args.name, args.namelen);
- args.dp = ip;
- args.whichfork = XFS_ATTR_FORK;
- /*
- * Decide on what work routines to call based on the inode size.
- */
- if (XFS_IFORK_Q(ip) == 0 ||
- (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_anextents == 0)) {
- error = XFS_ERROR(ENOATTR);
- } else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+ lock_mode = xfs_ilock_attr_map_shared(ip);
+ if (!xfs_inode_hasattr(ip))
+ error = ENOATTR;
+ else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
error = xfs_attr_shortform_getvalue(&args);
- } else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK)) {
+ else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK))
error = xfs_attr_leaf_get(&args);
- } else {
+ else
error = xfs_attr_node_get(&args);
- }
+ xfs_iunlock(ip, lock_mode);
+
+ *valuelenp = args.valuelen;
+ return error == EEXIST ? 0 : error;
+}
+
+/*
+ * Calculate how many blocks we need for the new attribute,
+ */
+STATIC int
+xfs_attr_calc_size(
+ struct xfs_da_args *args,
+ int *local)
+{
+ struct xfs_mount *mp = args->dp->i_mount;
+ int size;
+ int nblks;
/*
- * Return the number of bytes in the value to the caller.
+ * Determine space new attribute will use, and if it would be
+ * "local" or "remote" (note: local != inline).
*/
- *valuelenp = args.valuelen;
+ size = xfs_attr_leaf_newentsize(args, local);
+ nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
+ if (*local) {
+ if (size > (args->geo->blksize / 2)) {
+ /* Double split possible */
+ nblks *= 2;
+ }
+ } else {
+ /*
+ * Out of line attribute, cannot double split, but
+ * make room for the attribute value itself.
+ */
+ uint dblocks = xfs_attr3_rmt_blocks(mp, args->valuelen);
+ nblks += dblocks;
+ nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
+ }
- if (error == EEXIST)
- error = 0;
- return(error);
+ return nblks;
}
int
-xfs_attr_get(
- xfs_inode_t *ip,
- const char *name,
- char *value,
- int *valuelenp,
- int flags)
+xfs_attr_set(
+ struct xfs_inode *dp,
+ const unsigned char *name,
+ unsigned char *value,
+ int valuelen,
+ int flags)
{
- int error;
- struct xfs_name xname;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_da_args args;
+ struct xfs_bmap_free flist;
+ struct xfs_trans_res tres;
+ xfs_fsblock_t firstblock;
+ int rsvd = (flags & ATTR_ROOT) != 0;
+ int error, err2, committed, local;
- XFS_STATS_INC(xs_attr_get);
+ XFS_STATS_INC(xs_attr_set);
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return(EIO);
+ if (XFS_FORCED_SHUTDOWN(dp->i_mount))
+ return EIO;
- error = xfs_attr_name_to_xname(&xname, name);
+ error = xfs_attr_args_init(&args, dp, name, flags);
if (error)
return error;
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- error = xfs_attr_fetch(ip, &xname, value, valuelenp, flags);
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
- return(error);
-}
-
-STATIC int
-xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
- char *value, int valuelen, int flags)
-{
- xfs_da_args_t args;
- xfs_fsblock_t firstblock;
- xfs_bmap_free_t flist;
- int error, err2, committed;
- int local, size;
- uint nblks;
- xfs_mount_t *mp = dp->i_mount;
- int rsvd = (flags & ATTR_ROOT) != 0;
+ args.value = value;
+ args.valuelen = valuelen;
+ args.firstblock = &firstblock;
+ args.flist = &flist;
+ args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
+ args.total = xfs_attr_calc_size(&args, &local);
- /*
- * Attach the dquots to the inode.
- */
- if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
- return (error);
+ error = xfs_qm_dqattach(dp, 0);
+ if (error)
+ return error;
/*
* If the inode doesn't have an attribute fork, add one.
@@ -221,54 +236,13 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
*/
if (XFS_IFORK_Q(dp) == 0) {
int sf_size = sizeof(xfs_attr_sf_hdr_t) +
- XFS_ATTR_SF_ENTSIZE_BYNAME(name->len, valuelen);
+ XFS_ATTR_SF_ENTSIZE_BYNAME(args.namelen, valuelen);
- if ((error = xfs_bmap_add_attrfork(dp, sf_size, rsvd)))
- return(error);
- }
-
- /*
- * Fill in the arg structure for this request.
- */
- memset((char *)&args, 0, sizeof(args));
- args.name = name->name;
- args.namelen = name->len;
- args.value = value;
- args.valuelen = valuelen;
- args.flags = flags;
- args.hashval = xfs_da_hashname(args.name, args.namelen);
- args.dp = dp;
- args.firstblock = &firstblock;
- args.flist = &flist;
- args.whichfork = XFS_ATTR_FORK;
- args.addname = 1;
- args.oknoent = 1;
-
- /*
- * Determine space new attribute will use, and if it would be
- * "local" or "remote" (note: local != inline).
- */
- size = xfs_attr_leaf_newentsize(name->len, valuelen,
- mp->m_sb.sb_blocksize, &local);
-
- nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
- if (local) {
- if (size > (mp->m_sb.sb_blocksize >> 1)) {
- /* Double split possible */
- nblks <<= 1;
- }
- } else {
- uint dblocks = XFS_B_TO_FSB(mp, valuelen);
- /* Out of line attribute, cannot double split, but make
- * room for the attribute value itself.
- */
- nblks += dblocks;
- nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
+ error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);
+ if (error)
+ return error;
}
- /* Size is now blocks for attribute data */
- args.total = nblks;
-
/*
* Start our first transaction of the day.
*
@@ -289,34 +263,35 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
if (rsvd)
args.trans->t_flags |= XFS_TRANS_RESERVE;
- if ((error = xfs_trans_reserve(args.trans, (uint) nblks,
- XFS_ATTRSET_LOG_RES(mp, nblks),
- 0, XFS_TRANS_PERM_LOG_RES,
- XFS_ATTRSET_LOG_COUNT))) {
+ tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
+ M_RES(mp)->tr_attrsetrt.tr_logres * args.total;
+ tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
+ tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
+ error = xfs_trans_reserve(args.trans, &tres, args.total, 0);
+ if (error) {
xfs_trans_cancel(args.trans, 0);
- return(error);
+ return error;
}
xfs_ilock(dp, XFS_ILOCK_EXCL);
- error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0,
- rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
- XFS_QMOPT_RES_REGBLKS);
+ error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
+ rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
+ XFS_QMOPT_RES_REGBLKS);
if (error) {
xfs_iunlock(dp, XFS_ILOCK_EXCL);
xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES);
- return (error);
+ return error;
}
- xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args.trans, dp);
+ xfs_trans_ijoin(args.trans, dp, 0);
/*
* If the attribute list is non-existent or a shortform list,
* upgrade it to a single-leaf-block attribute list.
*/
- if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
- ((dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) &&
- (dp->i_d.di_anextents == 0))) {
+ if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
+ (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+ dp->i_d.di_anextents == 0)) {
/*
* Build initial attribute list (if required).
@@ -341,27 +316,25 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
* the transaction goes to disk before returning
* to the user.
*/
- if (mp->m_flags & XFS_MOUNT_WSYNC) {
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(args.trans);
+
+ if (!error && (flags & ATTR_KERNOTIME) == 0) {
+ xfs_trans_ichgtime(args.trans, dp,
+ XFS_ICHGTIME_CHG);
}
err2 = xfs_trans_commit(args.trans,
XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- /*
- * Hit the inode change time.
- */
- if (!error && (flags & ATTR_KERNOTIME) == 0) {
- xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
- }
- return(error == 0 ? err2 : error);
+ return error ? error : err2;
}
/*
* It won't fit in the shortform, transform to a leaf block.
* GROT: another possible req'mt for a double-split btree op.
*/
- XFS_BMAP_INIT(args.flist, args.firstblock);
+ xfs_bmap_init(args.flist, args.firstblock);
error = xfs_attr_shortform_to_leaf(&args);
if (!error) {
error = xfs_bmap_finish(&args.trans, args.flist,
@@ -378,36 +351,36 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args.trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args.trans, dp, 0);
/*
* Commit the leaf transformation. We'll need another (linked)
* transaction to add the new attribute to the leaf.
*/
- if ((error = xfs_attr_rolltrans(&args.trans, dp)))
+
+ error = xfs_trans_roll(&args.trans, dp);
+ if (error)
goto out;
}
- if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+ if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
error = xfs_attr_leaf_addname(&args);
- } else {
+ else
error = xfs_attr_node_addname(&args);
- }
- if (error) {
+ if (error)
goto out;
- }
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
- if (mp->m_flags & XFS_MOUNT_WSYNC) {
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(args.trans);
- }
+
+ if ((flags & ATTR_KERNOTIME) == 0)
+ xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
/*
* Commit the last in the sequence of transactions.
@@ -416,78 +389,58 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- /*
- * Hit the inode change time.
- */
- if (!error && (flags & ATTR_KERNOTIME) == 0) {
- xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
- }
-
- return(error);
+ return error;
out:
- if (args.trans)
+ if (args.trans) {
xfs_trans_cancel(args.trans,
XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+ }
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- return(error);
+ return error;
}
+/*
+ * Generic handler routine to remove a name from an attribute list.
+ * Transitions attribute list from Btree to shortform as necessary.
+ */
int
-xfs_attr_set(
- xfs_inode_t *dp,
- const char *name,
- char *value,
- int valuelen,
- int flags)
+xfs_attr_remove(
+ struct xfs_inode *dp,
+ const unsigned char *name,
+ int flags)
{
- int error;
- struct xfs_name xname;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_da_args args;
+ struct xfs_bmap_free flist;
+ xfs_fsblock_t firstblock;
+ int error;
- XFS_STATS_INC(xs_attr_set);
+ XFS_STATS_INC(xs_attr_remove);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
- return (EIO);
+ return EIO;
- error = xfs_attr_name_to_xname(&xname, name);
+ if (!xfs_inode_hasattr(dp))
+ return ENOATTR;
+
+ error = xfs_attr_args_init(&args, dp, name, flags);
if (error)
return error;
- return xfs_attr_set_int(dp, &xname, value, valuelen, flags);
-}
-
-/*
- * Generic handler routine to remove a name from an attribute list.
- * Transitions attribute list from Btree to shortform as necessary.
- */
-STATIC int
-xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
-{
- xfs_da_args_t args;
- xfs_fsblock_t firstblock;
- xfs_bmap_free_t flist;
- int error;
- xfs_mount_t *mp = dp->i_mount;
-
- /*
- * Fill in the arg structure for this request.
- */
- memset((char *)&args, 0, sizeof(args));
- args.name = name->name;
- args.namelen = name->len;
- args.flags = flags;
- args.hashval = xfs_da_hashname(args.name, args.namelen);
- args.dp = dp;
args.firstblock = &firstblock;
args.flist = &flist;
- args.total = 0;
- args.whichfork = XFS_ATTR_FORK;
/*
- * Attach the dquots to the inode.
+ * we have no control over the attribute names that userspace passes us
+ * to remove, so we have to allow the name lookup prior to attribute
+ * removal to fail.
*/
- if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
- return (error);
+ args.op_flags = XFS_DA_OP_OKNOENT;
+
+ error = xfs_qm_dqattach(dp, 0);
+ if (error)
+ return error;
/*
* Start our first transaction of the day.
@@ -509,13 +462,11 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
if (flags & ATTR_ROOT)
args.trans->t_flags |= XFS_TRANS_RESERVE;
- if ((error = xfs_trans_reserve(args.trans,
- XFS_ATTRRM_SPACE_RES(mp),
- XFS_ATTRRM_LOG_RES(mp),
- 0, XFS_TRANS_PERM_LOG_RES,
- XFS_ATTRRM_LOG_COUNT))) {
+ error = xfs_trans_reserve(args.trans, &M_RES(mp)->tr_attrrm,
+ XFS_ATTRRM_SPACE_RES(mp), 0);
+ if (error) {
xfs_trans_cancel(args.trans, 0);
- return(error);
+ return error;
}
xfs_ilock(dp, XFS_ILOCK_EXCL);
@@ -523,40 +474,31 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
* No need to make quota reservations here. We expect to release some
* blocks not allocate in the common case.
*/
- xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args.trans, dp);
+ xfs_trans_ijoin(args.trans, dp, 0);
- /*
- * Decide on what work routines to call based on the inode size.
- */
- if (XFS_IFORK_Q(dp) == 0 ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- dp->i_d.di_anextents == 0)) {
+ if (!xfs_inode_hasattr(dp)) {
error = XFS_ERROR(ENOATTR);
- goto out;
- }
- if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+ } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
error = xfs_attr_shortform_remove(&args);
- if (error) {
- goto out;
- }
} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
error = xfs_attr_leaf_removename(&args);
} else {
error = xfs_attr_node_removename(&args);
}
- if (error) {
+
+ if (error)
goto out;
- }
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
- if (mp->m_flags & XFS_MOUNT_WSYNC) {
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(args.trans);
- }
+
+ if ((flags & ATTR_KERNOTIME) == 0)
+ xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
/*
* Commit the last in the sequence of transactions.
@@ -565,335 +507,17 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- /*
- * Hit the inode change time.
- */
- if (!error && (flags & ATTR_KERNOTIME) == 0) {
- xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
- }
-
- return(error);
+ return error;
out:
- if (args.trans)
+ if (args.trans) {
xfs_trans_cancel(args.trans,
XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
- return(error);
-}
-
-int
-xfs_attr_remove(
- xfs_inode_t *dp,
- const char *name,
- int flags)
-{
- int error;
- struct xfs_name xname;
-
- XFS_STATS_INC(xs_attr_remove);
-
- if (XFS_FORCED_SHUTDOWN(dp->i_mount))
- return (EIO);
-
- error = xfs_attr_name_to_xname(&xname, name);
- if (error)
- return error;
-
- xfs_ilock(dp, XFS_ILOCK_SHARED);
- if (XFS_IFORK_Q(dp) == 0 ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- dp->i_d.di_anextents == 0)) {
- xfs_iunlock(dp, XFS_ILOCK_SHARED);
- return(XFS_ERROR(ENOATTR));
- }
- xfs_iunlock(dp, XFS_ILOCK_SHARED);
-
- return xfs_attr_remove_int(dp, &xname, flags);
-}
-
-STATIC int
-xfs_attr_list_int(xfs_attr_list_context_t *context)
-{
- int error;
- xfs_inode_t *dp = context->dp;
-
- /*
- * Decide on what work routines to call based on the inode size.
- */
- if (XFS_IFORK_Q(dp) == 0 ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- dp->i_d.di_anextents == 0)) {
- error = 0;
- } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
- error = xfs_attr_shortform_list(context);
- } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
- error = xfs_attr_leaf_list(context);
- } else {
- error = xfs_attr_node_list(context);
- }
- return error;
-}
-
-#define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
- (((struct attrlist_ent *) 0)->a_name - (char *) 0)
-#define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
- ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
- & ~(sizeof(u_int32_t)-1))
-
-/*
- * Format an attribute and copy it out to the user's buffer.
- * Take care to check values and protect against them changing later,
- * we may be reading them directly out of a user buffer.
- */
-/*ARGSUSED*/
-STATIC int
-xfs_attr_put_listent(xfs_attr_list_context_t *context, attrnames_t *namesp,
- char *name, int namelen,
- int valuelen, char *value)
-{
- attrlist_ent_t *aep;
- int arraytop;
-
- ASSERT(!(context->flags & ATTR_KERNOVAL));
- ASSERT(context->count >= 0);
- ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
- ASSERT(context->firstu >= sizeof(*context->alist));
- ASSERT(context->firstu <= context->bufsize);
-
- arraytop = sizeof(*context->alist) +
- context->count * sizeof(context->alist->al_offset[0]);
- context->firstu -= ATTR_ENTSIZE(namelen);
- if (context->firstu < arraytop) {
- xfs_attr_trace_l_c("buffer full", context);
- context->alist->al_more = 1;
- context->seen_enough = 1;
- return 1;
- }
-
- aep = (attrlist_ent_t *)&(((char *)context->alist)[ context->firstu ]);
- aep->a_valuelen = valuelen;
- memcpy(aep->a_name, name, namelen);
- aep->a_name[ namelen ] = 0;
- context->alist->al_offset[ context->count++ ] = context->firstu;
- context->alist->al_count = context->count;
- xfs_attr_trace_l_c("add", context);
- return 0;
-}
-
-STATIC int
-xfs_attr_kern_list(xfs_attr_list_context_t *context, attrnames_t *namesp,
- char *name, int namelen,
- int valuelen, char *value)
-{
- char *offset;
- int arraytop;
-
- ASSERT(context->count >= 0);
-
- arraytop = context->count + namesp->attr_namelen + namelen + 1;
- if (arraytop > context->firstu) {
- context->count = -1; /* insufficient space */
- return 1;
}
- offset = (char *)context->alist + context->count;
- strncpy(offset, namesp->attr_name, namesp->attr_namelen);
- offset += namesp->attr_namelen;
- strncpy(offset, name, namelen); /* real name */
- offset += namelen;
- *offset = '\0';
- context->count += namesp->attr_namelen + namelen + 1;
- return 0;
-}
-
-/*ARGSUSED*/
-STATIC int
-xfs_attr_kern_list_sizes(xfs_attr_list_context_t *context, attrnames_t *namesp,
- char *name, int namelen,
- int valuelen, char *value)
-{
- context->count += namesp->attr_namelen + namelen + 1;
- return 0;
-}
-
-/*
- * Generate a list of extended attribute names and optionally
- * also value lengths. Positive return value follows the XFS
- * convention of being an error, zero or negative return code
- * is the length of the buffer returned (negated), indicating
- * success.
- */
-int
-xfs_attr_list(
- xfs_inode_t *dp,
- char *buffer,
- int bufsize,
- int flags,
- attrlist_cursor_kern_t *cursor)
-{
- xfs_attr_list_context_t context;
- int error;
-
- XFS_STATS_INC(xs_attr_list);
-
- /*
- * Validate the cursor.
- */
- if (cursor->pad1 || cursor->pad2)
- return(XFS_ERROR(EINVAL));
- if ((cursor->initted == 0) &&
- (cursor->hashval || cursor->blkno || cursor->offset))
- return XFS_ERROR(EINVAL);
-
- /*
- * Check for a properly aligned buffer.
- */
- if (((long)buffer) & (sizeof(int)-1))
- return XFS_ERROR(EFAULT);
- if (flags & ATTR_KERNOVAL)
- bufsize = 0;
-
- /*
- * Initialize the output buffer.
- */
- context.dp = dp;
- context.cursor = cursor;
- context.count = 0;
- context.dupcnt = 0;
- context.resynch = 1;
- context.flags = flags;
- context.seen_enough = 0;
- context.alist = (attrlist_t *)buffer;
- context.put_value = 0;
-
- if (flags & ATTR_KERNAMELS) {
- context.bufsize = bufsize;
- context.firstu = context.bufsize;
- if (flags & ATTR_KERNOVAL)
- context.put_listent = xfs_attr_kern_list_sizes;
- else
- context.put_listent = xfs_attr_kern_list;
- } else {
- context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */
- context.firstu = context.bufsize;
- context.alist->al_count = 0;
- context.alist->al_more = 0;
- context.alist->al_offset[0] = context.bufsize;
- context.put_listent = xfs_attr_put_listent;
- }
-
- if (XFS_FORCED_SHUTDOWN(dp->i_mount))
- return EIO;
-
- xfs_ilock(dp, XFS_ILOCK_SHARED);
- xfs_attr_trace_l_c("syscall start", &context);
-
- error = xfs_attr_list_int(&context);
-
- xfs_iunlock(dp, XFS_ILOCK_SHARED);
- xfs_attr_trace_l_c("syscall end", &context);
-
- if (context.flags & (ATTR_KERNOVAL|ATTR_KERNAMELS)) {
- /* must return negated buffer size or the error */
- if (context.count < 0)
- error = XFS_ERROR(ERANGE);
- else
- error = -context.count;
- } else
- ASSERT(error >= 0);
-
- return error;
-}
-
-int /* error */
-xfs_attr_inactive(xfs_inode_t *dp)
-{
- xfs_trans_t *trans;
- xfs_mount_t *mp;
- int error;
-
- mp = dp->i_mount;
- ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
-
- xfs_ilock(dp, XFS_ILOCK_SHARED);
- if ((XFS_IFORK_Q(dp) == 0) ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- dp->i_d.di_anextents == 0)) {
- xfs_iunlock(dp, XFS_ILOCK_SHARED);
- return(0);
- }
- xfs_iunlock(dp, XFS_ILOCK_SHARED);
-
- /*
- * Start our first transaction of the day.
- *
- * All future transactions during this code must be "chained" off
- * this one via the trans_dup() call. All transactions will contain
- * the inode, and the inode will always be marked with trans_ihold().
- * Since the inode will be locked in all transactions, we must log
- * the inode in every transaction to let it float upward through
- * the log.
- */
- trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
- if ((error = xfs_trans_reserve(trans, 0, XFS_ATTRINVAL_LOG_RES(mp), 0,
- XFS_TRANS_PERM_LOG_RES,
- XFS_ATTRINVAL_LOG_COUNT))) {
- xfs_trans_cancel(trans, 0);
- return(error);
- }
- xfs_ilock(dp, XFS_ILOCK_EXCL);
-
- /*
- * No need to make quota reservations here. We expect to release some
- * blocks, not allocate, in the common case.
- */
- xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(trans, dp);
-
- /*
- * Decide on what work routines to call based on the inode size.
- */
- if ((XFS_IFORK_Q(dp) == 0) ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- dp->i_d.di_anextents == 0)) {
- error = 0;
- goto out;
- }
- error = xfs_attr_root_inactive(&trans, dp);
- if (error)
- goto out;
- /*
- * signal synchronous inactive transactions unless this
- * is a synchronous mount filesystem in which case we
- * know that we're here because we've been called out of
- * xfs_inactive which means that the last reference is gone
- * and the unlink transaction has already hit the disk so
- * async inactive transactions are safe.
- */
- if ((error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK,
- (!(mp->m_flags & XFS_MOUNT_WSYNC)
- ? 1 : 0))))
- goto out;
-
- /*
- * Commit the last in the sequence of transactions.
- */
- xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
- error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
- return(error);
-
-out:
- xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
- return(error);
+ return error;
}
-
-
/*========================================================================
* External routines when attribute list is inside the inode
*========================================================================*/
@@ -907,6 +531,8 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
{
int newsize, forkoff, retval;
+ trace_xfs_attr_sf_addname(args);
+
retval = xfs_attr_shortform_lookup(args);
if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
return(retval);
@@ -947,54 +573,67 @@ STATIC int
xfs_attr_leaf_addname(xfs_da_args_t *args)
{
xfs_inode_t *dp;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int retval, error, committed, forkoff;
+ trace_xfs_attr_leaf_addname(args);
+
/*
* Read the (only) block in the attribute list in.
*/
dp = args->dp;
args->blkno = 0;
- error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
- XFS_ATTR_FORK);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
if (error)
- return(error);
- ASSERT(bp != NULL);
+ return error;
/*
* Look up the given attribute in the leaf block. Figure out if
* the given flags produce an error or call for an atomic rename.
*/
- retval = xfs_attr_leaf_lookup_int(bp, args);
+ retval = xfs_attr3_leaf_lookup_int(bp, args);
if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
- xfs_da_brelse(args->trans, bp);
- return(retval);
+ xfs_trans_brelse(args->trans, bp);
+ return retval;
} else if (retval == EEXIST) {
if (args->flags & ATTR_CREATE) { /* pure create op */
- xfs_da_brelse(args->trans, bp);
- return(retval);
+ xfs_trans_brelse(args->trans, bp);
+ return retval;
}
- args->rename = 1; /* an atomic rename */
+
+ trace_xfs_attr_leaf_replace(args);
+
+ /* save the attribute state for later removal*/
+ args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */
args->blkno2 = args->blkno; /* set 2nd entry info*/
args->index2 = args->index;
args->rmtblkno2 = args->rmtblkno;
args->rmtblkcnt2 = args->rmtblkcnt;
+ args->rmtvaluelen2 = args->rmtvaluelen;
+
+ /*
+ * clear the remote attr state now that it is saved so that the
+ * values reflect the state of the attribute we are about to
+ * add, not the attribute we just found and will remove later.
+ */
+ args->rmtblkno = 0;
+ args->rmtblkcnt = 0;
+ args->rmtvaluelen = 0;
}
/*
* Add the attribute to the leaf block, transitioning to a Btree
* if required.
*/
- retval = xfs_attr_leaf_add(bp, args);
- xfs_da_buf_done(bp);
+ retval = xfs_attr3_leaf_add(bp, args);
if (retval == ENOSPC) {
/*
* Promote the attribute list to the Btree format, then
* Commit that transaction so that the node_addname() call
* can manage its own transactions.
*/
- XFS_BMAP_INIT(args->flist, args->firstblock);
- error = xfs_attr_leaf_to_node(args);
+ xfs_bmap_init(args->flist, args->firstblock);
+ error = xfs_attr3_leaf_to_node(args);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
@@ -1010,16 +649,15 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp, 0);
/*
* Commit the current trans (including the inode) and start
* a new one.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
return (error);
/*
@@ -1033,7 +671,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* Commit the transaction that added the attr name so that
* later routines can manage their own transactions.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
return (error);
/*
@@ -1054,12 +693,12 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* so that one disappears and one appears atomically. Then we
* must remove the "old" attribute/value pair.
*/
- if (args->rename) {
+ if (args->op_flags & XFS_DA_OP_RENAME) {
/*
* In a separate transaction, set the incomplete flag on the
* "old" attr and clear the incomplete flag on the "new" attr.
*/
- error = xfs_attr_leaf_flipflags(args);
+ error = xfs_attr3_leaf_flipflags(args);
if (error)
return(error);
@@ -1071,6 +710,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
args->blkno = args->blkno2;
args->rmtblkno = args->rmtblkno2;
args->rmtblkcnt = args->rmtblkcnt2;
+ args->rmtvaluelen = args->rmtvaluelen2;
if (args->rmtblkno) {
error = xfs_attr_rmtval_remove(args);
if (error)
@@ -1081,19 +721,19 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* Read in the block containing the "old" attr, then
* remove the "old" attr from that block (neat, huh!)
*/
- error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1,
- &bp, XFS_ATTR_FORK);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno,
+ -1, &bp);
if (error)
- return(error);
- ASSERT(bp != NULL);
- (void)xfs_attr_leaf_remove(bp, args);
+ return error;
+
+ xfs_attr3_leaf_remove(bp, args);
/*
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
- error = xfs_attr_leaf_to_shortform(bp, args, forkoff);
+ xfs_bmap_init(args->flist, args->firstblock);
+ error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error) {
error = xfs_bmap_finish(&args->trans,
@@ -1112,25 +752,22 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* and started a new one. We need the inode to be
* in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
- } else
- xfs_da_buf_done(bp);
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp, 0);
+ }
/*
* Commit the remove and start the next trans in series.
*/
- error = xfs_attr_rolltrans(&args->trans, dp);
+ error = xfs_trans_roll(&args->trans, dp);
} else if (args->rmtblkno > 0) {
/*
* Added a "remote" value, just clear the incomplete flag.
*/
- error = xfs_attr_leaf_clearflag(args);
+ error = xfs_attr3_leaf_clearflag(args);
}
- return(error);
+ return error;
}
/*
@@ -1143,35 +780,34 @@ STATIC int
xfs_attr_leaf_removename(xfs_da_args_t *args)
{
xfs_inode_t *dp;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error, committed, forkoff;
+ trace_xfs_attr_leaf_removename(args);
+
/*
* Remove the attribute.
*/
dp = args->dp;
args->blkno = 0;
- error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
- XFS_ATTR_FORK);
- if (error) {
- return(error);
- }
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ if (error)
+ return error;
- ASSERT(bp != NULL);
- error = xfs_attr_leaf_lookup_int(bp, args);
+ error = xfs_attr3_leaf_lookup_int(bp, args);
if (error == ENOATTR) {
- xfs_da_brelse(args->trans, bp);
- return(error);
+ xfs_trans_brelse(args->trans, bp);
+ return error;
}
- (void)xfs_attr_leaf_remove(bp, args);
+ xfs_attr3_leaf_remove(bp, args);
/*
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
- error = xfs_attr_leaf_to_shortform(bp, args, forkoff);
+ xfs_bmap_init(args->flist, args->firstblock);
+ error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
@@ -1181,20 +817,17 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
- return(error);
+ return error;
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
- } else
- xfs_da_buf_done(bp);
- return(0);
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp, 0);
+ }
+ return 0;
}
/*
@@ -1206,60 +839,31 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
STATIC int
xfs_attr_leaf_get(xfs_da_args_t *args)
{
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error;
+ trace_xfs_attr_leaf_get(args);
+
args->blkno = 0;
- error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
- XFS_ATTR_FORK);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
if (error)
- return(error);
- ASSERT(bp != NULL);
+ return error;
- error = xfs_attr_leaf_lookup_int(bp, args);
+ error = xfs_attr3_leaf_lookup_int(bp, args);
if (error != EEXIST) {
- xfs_da_brelse(args->trans, bp);
- return(error);
+ xfs_trans_brelse(args->trans, bp);
+ return error;
}
- error = xfs_attr_leaf_getvalue(bp, args);
- xfs_da_brelse(args->trans, bp);
+ error = xfs_attr3_leaf_getvalue(bp, args);
+ xfs_trans_brelse(args->trans, bp);
if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) {
error = xfs_attr_rmtval_get(args);
}
- return(error);
-}
-
-/*
- * Copy out attribute entries for attr_list(), for leaf attribute lists.
- */
-STATIC int
-xfs_attr_leaf_list(xfs_attr_list_context_t *context)
-{
- xfs_attr_leafblock_t *leaf;
- int error;
- xfs_dabuf_t *bp;
-
- context->cursor->blkno = 0;
- error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK);
- if (error)
- return XFS_ERROR(error);
- ASSERT(bp != NULL);
- leaf = bp->data;
- if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) {
- XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
- context->dp->i_mount, leaf);
- xfs_da_brelse(NULL, bp);
- return XFS_ERROR(EFSCORRUPTED);
- }
-
- error = xfs_attr_leaf_list_int(bp, context);
- xfs_da_brelse(NULL, bp);
- return XFS_ERROR(error);
+ return error;
}
-
/*========================================================================
- * External routines when attribute list size > XFS_LBSIZE(mp).
+ * External routines when attribute list size > geo->blksize
*========================================================================*/
/*
@@ -1281,6 +885,8 @@ xfs_attr_node_addname(xfs_da_args_t *args)
xfs_mount_t *mp;
int committed, retval, error;
+ trace_xfs_attr_node_addname(args);
+
/*
* Fill in bucket of arguments/results/context to carry around.
*/
@@ -1290,14 +896,12 @@ restart:
state = xfs_da_state_alloc();
state->args = args;
state->mp = mp;
- state->blocksize = state->mp->m_sb.sb_blocksize;
- state->node_ents = state->mp->m_attr_node_ents;
/*
* Search to see if name already exists, and get back a pointer
* to where it should go.
*/
- error = xfs_da_node_lookup_int(state, &retval);
+ error = xfs_da3_node_lookup_int(state, &retval);
if (error)
goto out;
blk = &state->path.blk[ state->path.active-1 ];
@@ -1307,16 +911,28 @@ restart:
} else if (retval == EEXIST) {
if (args->flags & ATTR_CREATE)
goto out;
- args->rename = 1; /* atomic rename op */
+
+ trace_xfs_attr_node_replace(args);
+
+ /* save the attribute state for later removal*/
+ args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */
args->blkno2 = args->blkno; /* set 2nd entry info*/
args->index2 = args->index;
args->rmtblkno2 = args->rmtblkno;
args->rmtblkcnt2 = args->rmtblkcnt;
+ args->rmtvaluelen2 = args->rmtvaluelen;
+
+ /*
+ * clear the remote attr state now that it is saved so that the
+ * values reflect the state of the attribute we are about to
+ * add, not the attribute we just found and will remove later.
+ */
args->rmtblkno = 0;
args->rmtblkcnt = 0;
+ args->rmtvaluelen = 0;
}
- retval = xfs_attr_leaf_add(blk->bp, state->args);
+ retval = xfs_attr3_leaf_add(blk->bp, state->args);
if (retval == ENOSPC) {
if (state->path.active == 1) {
/*
@@ -1325,8 +941,9 @@ restart:
* have been a b-tree.
*/
xfs_da_state_free(state);
- XFS_BMAP_INIT(args->flist, args->firstblock);
- error = xfs_attr_leaf_to_node(args);
+ state = NULL;
+ xfs_bmap_init(args->flist, args->firstblock);
+ error = xfs_attr3_leaf_to_node(args);
if (!error) {
error = xfs_bmap_finish(&args->trans,
args->flist,
@@ -1344,16 +961,15 @@ restart:
* and started a new one. We need the inode to be
* in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp, 0);
/*
* Commit the node conversion and start the next
* trans in the chain.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
goto out;
goto restart;
@@ -1365,8 +981,8 @@ restart:
* in the index/blkno/rmtblkno/rmtblkcnt fields and
* in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
*/
- XFS_BMAP_INIT(args->flist, args->firstblock);
- error = xfs_da_split(state);
+ xfs_bmap_init(args->flist, args->firstblock);
+ error = xfs_da3_split(state);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
@@ -1382,15 +998,13 @@ restart:
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp, 0);
} else {
/*
* Addition succeeded, update Btree hashvals.
*/
- xfs_da_fixhashpath(state, &state->path);
+ xfs_da3_fixhashpath(state, &state->path);
}
/*
@@ -1404,7 +1018,8 @@ restart:
* Commit the leaf addition or btree split and start the next
* trans in the chain.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
goto out;
/*
@@ -1425,12 +1040,12 @@ restart:
* so that one disappears and one appears atomically. Then we
* must remove the "old" attribute/value pair.
*/
- if (args->rename) {
+ if (args->op_flags & XFS_DA_OP_RENAME) {
/*
* In a separate transaction, set the incomplete flag on the
* "old" attr and clear the incomplete flag on the "new" attr.
*/
- error = xfs_attr_leaf_flipflags(args);
+ error = xfs_attr3_leaf_flipflags(args);
if (error)
goto out;
@@ -1442,6 +1057,7 @@ restart:
args->blkno = args->blkno2;
args->rmtblkno = args->rmtblkno2;
args->rmtblkcnt = args->rmtblkcnt2;
+ args->rmtvaluelen = args->rmtvaluelen2;
if (args->rmtblkno) {
error = xfs_attr_rmtval_remove(args);
if (error)
@@ -1457,10 +1073,8 @@ restart:
state = xfs_da_state_alloc();
state->args = args;
state->mp = mp;
- state->blocksize = state->mp->m_sb.sb_blocksize;
- state->node_ents = state->mp->m_attr_node_ents;
state->inleaf = 0;
- error = xfs_da_node_lookup_int(state, &retval);
+ error = xfs_da3_node_lookup_int(state, &retval);
if (error)
goto out;
@@ -1469,15 +1083,15 @@ restart:
*/
blk = &state->path.blk[ state->path.active-1 ];
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
- error = xfs_attr_leaf_remove(blk->bp, args);
- xfs_da_fixhashpath(state, &state->path);
+ error = xfs_attr3_leaf_remove(blk->bp, args);
+ xfs_da3_fixhashpath(state, &state->path);
/*
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
- error = xfs_da_join(state);
+ xfs_bmap_init(args->flist, args->firstblock);
+ error = xfs_da3_join(state);
if (!error) {
error = xfs_bmap_finish(&args->trans,
args->flist,
@@ -1495,23 +1109,22 @@ restart:
* and started a new one. We need the inode to be
* in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp, 0);
}
/*
* Commit and start the next trans in the chain.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
goto out;
} else if (args->rmtblkno > 0) {
/*
* Added a "remote" value, just clear the incomplete flag.
*/
- error = xfs_attr_leaf_clearflag(args);
+ error = xfs_attr3_leaf_clearflag(args);
if (error)
goto out;
}
@@ -1538,9 +1151,11 @@ xfs_attr_node_removename(xfs_da_args_t *args)
xfs_da_state_t *state;
xfs_da_state_blk_t *blk;
xfs_inode_t *dp;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int retval, error, committed, forkoff;
+ trace_xfs_attr_node_removename(args);
+
/*
* Tie a string around our finger to remind us where we are.
*/
@@ -1548,13 +1163,11 @@ xfs_attr_node_removename(xfs_da_args_t *args)
state = xfs_da_state_alloc();
state->args = args;
state->mp = dp->i_mount;
- state->blocksize = state->mp->m_sb.sb_blocksize;
- state->node_ents = state->mp->m_attr_node_ents;
/*
* Search to see if name exists, and get back a pointer to it.
*/
- error = xfs_da_node_lookup_int(state, &retval);
+ error = xfs_da3_node_lookup_int(state, &retval);
if (error || (retval != EEXIST)) {
if (error == 0)
error = retval;
@@ -1583,7 +1196,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* Mark the attribute as INCOMPLETE, then bunmapi() the
* remote value.
*/
- error = xfs_attr_leaf_setflag(args);
+ error = xfs_attr3_leaf_setflag(args);
if (error)
goto out;
error = xfs_attr_rmtval_remove(args);
@@ -1604,15 +1217,15 @@ xfs_attr_node_removename(xfs_da_args_t *args)
*/
blk = &state->path.blk[ state->path.active-1 ];
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
- retval = xfs_attr_leaf_remove(blk->bp, args);
- xfs_da_fixhashpath(state, &state->path);
+ retval = xfs_attr3_leaf_remove(blk->bp, args);
+ xfs_da3_fixhashpath(state, &state->path);
/*
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
- error = xfs_da_join(state);
+ xfs_bmap_init(args->flist, args->firstblock);
+ error = xfs_da3_join(state);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
@@ -1628,15 +1241,14 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp, 0);
/*
* Commit the Btree join operation and start a new trans.
*/
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
goto out;
}
@@ -1649,20 +1261,15 @@ xfs_attr_node_removename(xfs_da_args_t *args)
*/
ASSERT(state->path.active == 1);
ASSERT(state->path.blk[0].bp);
- xfs_da_buf_done(state->path.blk[0].bp);
state->path.blk[0].bp = NULL;
- error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
- XFS_ATTR_FORK);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, 0, -1, &bp);
if (error)
goto out;
- ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *)
- bp->data)->hdr.info.magic)
- == XFS_ATTR_LEAF_MAGIC);
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
- error = xfs_attr_leaf_to_shortform(bp, args, forkoff);
+ xfs_bmap_init(args->flist, args->firstblock);
+ error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error) {
error = xfs_bmap_finish(&args->trans,
@@ -1681,12 +1288,10 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* and started a new one. We need the inode to be
* in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp, 0);
} else
- xfs_da_brelse(args->trans, bp);
+ xfs_trans_brelse(args->trans, bp);
}
error = 0;
@@ -1708,6 +1313,8 @@ xfs_attr_fillstate(xfs_da_state_t *state)
xfs_da_state_blk_t *blk;
int level;
+ trace_xfs_attr_fillstate(state->args);
+
/*
* Roll down the "path" in the state structure, storing the on-disk
* block number for those buffers in the "path".
@@ -1716,8 +1323,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
- blk->disk_blkno = xfs_da_blkno(blk->bp);
- xfs_da_buf_done(blk->bp);
+ blk->disk_blkno = XFS_BUF_ADDR(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
@@ -1732,8 +1338,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
- blk->disk_blkno = xfs_da_blkno(blk->bp);
- xfs_da_buf_done(blk->bp);
+ blk->disk_blkno = XFS_BUF_ADDR(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
@@ -1756,6 +1361,8 @@ xfs_attr_refillstate(xfs_da_state_t *state)
xfs_da_state_blk_t *blk;
int level, error;
+ trace_xfs_attr_refillstate(state->args);
+
/*
* Roll down the "path" in the state structure, storing the on-disk
* block number for those buffers in the "path".
@@ -1764,7 +1371,7 @@ xfs_attr_refillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->disk_blkno) {
- error = xfs_da_read_buf(state->args->trans,
+ error = xfs_da3_node_read(state->args->trans,
state->args->dp,
blk->blkno, blk->disk_blkno,
&blk->bp, XFS_ATTR_FORK);
@@ -1783,7 +1390,7 @@ xfs_attr_refillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->disk_blkno) {
- error = xfs_da_read_buf(state->args->trans,
+ error = xfs_da3_node_read(state->args->trans,
state->args->dp,
blk->blkno, blk->disk_blkno,
&blk->bp, XFS_ATTR_FORK);
@@ -1812,16 +1419,16 @@ xfs_attr_node_get(xfs_da_args_t *args)
int error, retval;
int i;
+ trace_xfs_attr_node_get(args);
+
state = xfs_da_state_alloc();
state->args = args;
state->mp = args->dp->i_mount;
- state->blocksize = state->mp->m_sb.sb_blocksize;
- state->node_ents = state->mp->m_attr_node_ents;
/*
* Search to see if name exists, and get back a pointer to it.
*/
- error = xfs_da_node_lookup_int(state, &retval);
+ error = xfs_da3_node_lookup_int(state, &retval);
if (error) {
retval = error;
} else if (retval == EEXIST) {
@@ -1832,7 +1439,7 @@ xfs_attr_node_get(xfs_da_args_t *args)
/*
* Get the value, local or "remote"
*/
- retval = xfs_attr_leaf_getvalue(blk->bp, args);
+ retval = xfs_attr3_leaf_getvalue(blk->bp, args);
if (!retval && (args->rmtblkno > 0)
&& !(args->flags & ATTR_KERNOVAL)) {
retval = xfs_attr_rmtval_get(args);
@@ -1843,903 +1450,10 @@ xfs_attr_node_get(xfs_da_args_t *args)
* If not in a transaction, we have to release all the buffers.
*/
for (i = 0; i < state->path.active; i++) {
- xfs_da_brelse(args->trans, state->path.blk[i].bp);
+ xfs_trans_brelse(args->trans, state->path.blk[i].bp);
state->path.blk[i].bp = NULL;
}
xfs_da_state_free(state);
return(retval);
}
-
-STATIC int /* error */
-xfs_attr_node_list(xfs_attr_list_context_t *context)
-{
- attrlist_cursor_kern_t *cursor;
- xfs_attr_leafblock_t *leaf;
- xfs_da_intnode_t *node;
- xfs_da_node_entry_t *btree;
- int error, i;
- xfs_dabuf_t *bp;
-
- cursor = context->cursor;
- cursor->initted = 1;
-
- /*
- * Do all sorts of validation on the passed-in cursor structure.
- * If anything is amiss, ignore the cursor and look up the hashval
- * starting from the btree root.
- */
- bp = NULL;
- if (cursor->blkno > 0) {
- error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
- &bp, XFS_ATTR_FORK);
- if ((error != 0) && (error != EFSCORRUPTED))
- return(error);
- if (bp) {
- node = bp->data;
- switch (be16_to_cpu(node->hdr.info.magic)) {
- case XFS_DA_NODE_MAGIC:
- xfs_attr_trace_l_cn("wrong blk", context, node);
- xfs_da_brelse(NULL, bp);
- bp = NULL;
- break;
- case XFS_ATTR_LEAF_MAGIC:
- leaf = bp->data;
- if (cursor->hashval > be32_to_cpu(leaf->entries[
- be16_to_cpu(leaf->hdr.count)-1].hashval)) {
- xfs_attr_trace_l_cl("wrong blk",
- context, leaf);
- xfs_da_brelse(NULL, bp);
- bp = NULL;
- } else if (cursor->hashval <=
- be32_to_cpu(leaf->entries[0].hashval)) {
- xfs_attr_trace_l_cl("maybe wrong blk",
- context, leaf);
- xfs_da_brelse(NULL, bp);
- bp = NULL;
- }
- break;
- default:
- xfs_attr_trace_l_c("wrong blk - ??", context);
- xfs_da_brelse(NULL, bp);
- bp = NULL;
- }
- }
- }
-
- /*
- * We did not find what we expected given the cursor's contents,
- * so we start from the top and work down based on the hash value.
- * Note that start of node block is same as start of leaf block.
- */
- if (bp == NULL) {
- cursor->blkno = 0;
- for (;;) {
- error = xfs_da_read_buf(NULL, context->dp,
- cursor->blkno, -1, &bp,
- XFS_ATTR_FORK);
- if (error)
- return(error);
- if (unlikely(bp == NULL)) {
- XFS_ERROR_REPORT("xfs_attr_node_list(2)",
- XFS_ERRLEVEL_LOW,
- context->dp->i_mount);
- return(XFS_ERROR(EFSCORRUPTED));
- }
- node = bp->data;
- if (be16_to_cpu(node->hdr.info.magic)
- == XFS_ATTR_LEAF_MAGIC)
- break;
- if (unlikely(be16_to_cpu(node->hdr.info.magic)
- != XFS_DA_NODE_MAGIC)) {
- XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
- XFS_ERRLEVEL_LOW,
- context->dp->i_mount,
- node);
- xfs_da_brelse(NULL, bp);
- return(XFS_ERROR(EFSCORRUPTED));
- }
- btree = node->btree;
- for (i = 0; i < be16_to_cpu(node->hdr.count);
- btree++, i++) {
- if (cursor->hashval
- <= be32_to_cpu(btree->hashval)) {
- cursor->blkno = be32_to_cpu(btree->before);
- xfs_attr_trace_l_cb("descending",
- context, btree);
- break;
- }
- }
- if (i == be16_to_cpu(node->hdr.count)) {
- xfs_da_brelse(NULL, bp);
- return(0);
- }
- xfs_da_brelse(NULL, bp);
- }
- }
- ASSERT(bp != NULL);
-
- /*
- * Roll upward through the blocks, processing each leaf block in
- * order. As long as there is space in the result buffer, keep
- * adding the information.
- */
- for (;;) {
- leaf = bp->data;
- if (unlikely(be16_to_cpu(leaf->hdr.info.magic)
- != XFS_ATTR_LEAF_MAGIC)) {
- XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)",
- XFS_ERRLEVEL_LOW,
- context->dp->i_mount, leaf);
- xfs_da_brelse(NULL, bp);
- return(XFS_ERROR(EFSCORRUPTED));
- }
- error = xfs_attr_leaf_list_int(bp, context);
- if (error) {
- xfs_da_brelse(NULL, bp);
- return error;
- }
- if (context->seen_enough || leaf->hdr.info.forw == 0)
- break;
- cursor->blkno = be32_to_cpu(leaf->hdr.info.forw);
- xfs_da_brelse(NULL, bp);
- error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
- &bp, XFS_ATTR_FORK);
- if (error)
- return(error);
- if (unlikely((bp == NULL))) {
- XFS_ERROR_REPORT("xfs_attr_node_list(5)",
- XFS_ERRLEVEL_LOW,
- context->dp->i_mount);
- return(XFS_ERROR(EFSCORRUPTED));
- }
- }
- xfs_da_brelse(NULL, bp);
- return(0);
-}
-
-
-/*========================================================================
- * External routines for manipulating out-of-line attribute values.
- *========================================================================*/
-
-/*
- * Read the value associated with an attribute from the out-of-line buffer
- * that we stored it in.
- */
-int
-xfs_attr_rmtval_get(xfs_da_args_t *args)
-{
- xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE];
- xfs_mount_t *mp;
- xfs_daddr_t dblkno;
- xfs_caddr_t dst;
- xfs_buf_t *bp;
- int nmap, error, tmp, valuelen, blkcnt, i;
- xfs_dablk_t lblkno;
-
- ASSERT(!(args->flags & ATTR_KERNOVAL));
-
- mp = args->dp->i_mount;
- dst = args->value;
- valuelen = args->valuelen;
- lblkno = args->rmtblkno;
- while (valuelen > 0) {
- nmap = ATTR_RMTVALUE_MAPSIZE;
- error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno,
- args->rmtblkcnt,
- XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- NULL, 0, map, &nmap, NULL, NULL);
- if (error)
- return(error);
- ASSERT(nmap >= 1);
-
- for (i = 0; (i < nmap) && (valuelen > 0); i++) {
- ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
- (map[i].br_startblock != HOLESTARTBLOCK));
- dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
- blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
- error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
- blkcnt, XFS_BUF_LOCK, &bp);
- if (error)
- return(error);
-
- tmp = (valuelen < XFS_BUF_SIZE(bp))
- ? valuelen : XFS_BUF_SIZE(bp);
- xfs_biomove(bp, 0, tmp, dst, XFS_B_READ);
- xfs_buf_relse(bp);
- dst += tmp;
- valuelen -= tmp;
-
- lblkno += map[i].br_blockcount;
- }
- }
- ASSERT(valuelen == 0);
- return(0);
-}
-
-/*
- * Write the value associated with an attribute into the out-of-line buffer
- * that we have defined for it.
- */
-STATIC int
-xfs_attr_rmtval_set(xfs_da_args_t *args)
-{
- xfs_mount_t *mp;
- xfs_fileoff_t lfileoff;
- xfs_inode_t *dp;
- xfs_bmbt_irec_t map;
- xfs_daddr_t dblkno;
- xfs_caddr_t src;
- xfs_buf_t *bp;
- xfs_dablk_t lblkno;
- int blkcnt, valuelen, nmap, error, tmp, committed;
-
- dp = args->dp;
- mp = dp->i_mount;
- src = args->value;
-
- /*
- * Find a "hole" in the attribute address space large enough for
- * us to drop the new attribute's value into.
- */
- blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
- lfileoff = 0;
- error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
- XFS_ATTR_FORK);
- if (error) {
- return(error);
- }
- args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
- args->rmtblkcnt = blkcnt;
-
- /*
- * Roll through the "value", allocating blocks on disk as required.
- */
- while (blkcnt > 0) {
- /*
- * Allocate a single extent, up to the size of the value.
- */
- XFS_BMAP_INIT(args->flist, args->firstblock);
- nmap = 1;
- error = xfs_bmapi(args->trans, dp, (xfs_fileoff_t)lblkno,
- blkcnt,
- XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA |
- XFS_BMAPI_WRITE,
- args->firstblock, args->total, &map, &nmap,
- args->flist, NULL);
- if (!error) {
- error = xfs_bmap_finish(&args->trans, args->flist,
- &committed);
- }
- if (error) {
- ASSERT(committed);
- args->trans = NULL;
- xfs_bmap_cancel(args->flist);
- return(error);
- }
-
- /*
- * bmap_finish() may have committed the last trans and started
- * a new one. We need the inode to be in all transactions.
- */
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
-
- ASSERT(nmap == 1);
- ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
- (map.br_startblock != HOLESTARTBLOCK));
- lblkno += map.br_blockcount;
- blkcnt -= map.br_blockcount;
-
- /*
- * Start the next trans in the chain.
- */
- if ((error = xfs_attr_rolltrans(&args->trans, dp)))
- return (error);
- }
-
- /*
- * Roll through the "value", copying the attribute value to the
- * already-allocated blocks. Blocks are written synchronously
- * so that we can know they are all on disk before we turn off
- * the INCOMPLETE flag.
- */
- lblkno = args->rmtblkno;
- valuelen = args->valuelen;
- while (valuelen > 0) {
- /*
- * Try to remember where we decided to put the value.
- */
- XFS_BMAP_INIT(args->flist, args->firstblock);
- nmap = 1;
- error = xfs_bmapi(NULL, dp, (xfs_fileoff_t)lblkno,
- args->rmtblkcnt,
- XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- args->firstblock, 0, &map, &nmap,
- NULL, NULL);
- if (error) {
- return(error);
- }
- ASSERT(nmap == 1);
- ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
- (map.br_startblock != HOLESTARTBLOCK));
-
- dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
- blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
-
- bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno,
- blkcnt, XFS_BUF_LOCK);
- ASSERT(bp);
- ASSERT(!XFS_BUF_GETERROR(bp));
-
- tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
- XFS_BUF_SIZE(bp);
- xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE);
- if (tmp < XFS_BUF_SIZE(bp))
- xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
- if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */
- return (error);
- }
- src += tmp;
- valuelen -= tmp;
-
- lblkno += map.br_blockcount;
- }
- ASSERT(valuelen == 0);
- return(0);
-}
-
-/*
- * Remove the value associated with an attribute by deleting the
- * out-of-line buffer that it is stored on.
- */
-STATIC int
-xfs_attr_rmtval_remove(xfs_da_args_t *args)
-{
- xfs_mount_t *mp;
- xfs_bmbt_irec_t map;
- xfs_buf_t *bp;
- xfs_daddr_t dblkno;
- xfs_dablk_t lblkno;
- int valuelen, blkcnt, nmap, error, done, committed;
-
- mp = args->dp->i_mount;
-
- /*
- * Roll through the "value", invalidating the attribute value's
- * blocks.
- */
- lblkno = args->rmtblkno;
- valuelen = args->rmtblkcnt;
- while (valuelen > 0) {
- /*
- * Try to remember where we decided to put the value.
- */
- XFS_BMAP_INIT(args->flist, args->firstblock);
- nmap = 1;
- error = xfs_bmapi(NULL, args->dp, (xfs_fileoff_t)lblkno,
- args->rmtblkcnt,
- XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- args->firstblock, 0, &map, &nmap,
- args->flist, NULL);
- if (error) {
- return(error);
- }
- ASSERT(nmap == 1);
- ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
- (map.br_startblock != HOLESTARTBLOCK));
-
- dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
- blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
-
- /*
- * If the "remote" value is in the cache, remove it.
- */
- bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt,
- XFS_INCORE_TRYLOCK);
- if (bp) {
- XFS_BUF_STALE(bp);
- XFS_BUF_UNDELAYWRITE(bp);
- xfs_buf_relse(bp);
- bp = NULL;
- }
-
- valuelen -= map.br_blockcount;
-
- lblkno += map.br_blockcount;
- }
-
- /*
- * Keep de-allocating extents until the remote-value region is gone.
- */
- lblkno = args->rmtblkno;
- blkcnt = args->rmtblkcnt;
- done = 0;
- while (!done) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
- error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
- XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- 1, args->firstblock, args->flist,
- NULL, &done);
- if (!error) {
- error = xfs_bmap_finish(&args->trans, args->flist,
- &committed);
- }
- if (error) {
- ASSERT(committed);
- args->trans = NULL;
- xfs_bmap_cancel(args->flist);
- return(error);
- }
-
- /*
- * bmap_finish() may have committed the last trans and started
- * a new one. We need the inode to be in all transactions.
- */
- if (committed) {
- xfs_trans_ijoin(args->trans, args->dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, args->dp);
- }
-
- /*
- * Close out trans and start the next one in the chain.
- */
- if ((error = xfs_attr_rolltrans(&args->trans, args->dp)))
- return (error);
- }
- return(0);
-}
-
-#if defined(XFS_ATTR_TRACE)
-/*
- * Add a trace buffer entry for an attr_list context structure.
- */
-void
-xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where,
- (__psunsigned_t)context->dp,
- (__psunsigned_t)context->cursor->hashval,
- (__psunsigned_t)context->cursor->blkno,
- (__psunsigned_t)context->cursor->offset,
- (__psunsigned_t)context->alist,
- (__psunsigned_t)context->bufsize,
- (__psunsigned_t)context->count,
- (__psunsigned_t)context->firstu,
- (__psunsigned_t)
- ((context->count > 0) &&
- !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
- ? (ATTR_ENTRY(context->alist,
- context->count-1)->a_valuelen)
- : 0,
- (__psunsigned_t)context->dupcnt,
- (__psunsigned_t)context->flags,
- (__psunsigned_t)NULL,
- (__psunsigned_t)NULL,
- (__psunsigned_t)NULL);
-}
-
-/*
- * Add a trace buffer entry for a context structure and a Btree node.
- */
-void
-xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_intnode *node)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where,
- (__psunsigned_t)context->dp,
- (__psunsigned_t)context->cursor->hashval,
- (__psunsigned_t)context->cursor->blkno,
- (__psunsigned_t)context->cursor->offset,
- (__psunsigned_t)context->alist,
- (__psunsigned_t)context->bufsize,
- (__psunsigned_t)context->count,
- (__psunsigned_t)context->firstu,
- (__psunsigned_t)
- ((context->count > 0) &&
- !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
- ? (ATTR_ENTRY(context->alist,
- context->count-1)->a_valuelen)
- : 0,
- (__psunsigned_t)context->dupcnt,
- (__psunsigned_t)context->flags,
- (__psunsigned_t)be16_to_cpu(node->hdr.count),
- (__psunsigned_t)be32_to_cpu(node->btree[0].hashval),
- (__psunsigned_t)be32_to_cpu(node->btree[
- be16_to_cpu(node->hdr.count)-1].hashval));
-}
-
-/*
- * Add a trace buffer entry for a context structure and a Btree element.
- */
-void
-xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_node_entry *btree)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where,
- (__psunsigned_t)context->dp,
- (__psunsigned_t)context->cursor->hashval,
- (__psunsigned_t)context->cursor->blkno,
- (__psunsigned_t)context->cursor->offset,
- (__psunsigned_t)context->alist,
- (__psunsigned_t)context->bufsize,
- (__psunsigned_t)context->count,
- (__psunsigned_t)context->firstu,
- (__psunsigned_t)
- ((context->count > 0) &&
- !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
- ? (ATTR_ENTRY(context->alist,
- context->count-1)->a_valuelen)
- : 0,
- (__psunsigned_t)context->dupcnt,
- (__psunsigned_t)context->flags,
- (__psunsigned_t)be32_to_cpu(btree->hashval),
- (__psunsigned_t)be32_to_cpu(btree->before),
- (__psunsigned_t)NULL);
-}
-
-/*
- * Add a trace buffer entry for a context structure and a leaf block.
- */
-void
-xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
- struct xfs_attr_leafblock *leaf)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where,
- (__psunsigned_t)context->dp,
- (__psunsigned_t)context->cursor->hashval,
- (__psunsigned_t)context->cursor->blkno,
- (__psunsigned_t)context->cursor->offset,
- (__psunsigned_t)context->alist,
- (__psunsigned_t)context->bufsize,
- (__psunsigned_t)context->count,
- (__psunsigned_t)context->firstu,
- (__psunsigned_t)
- ((context->count > 0) &&
- !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
- ? (ATTR_ENTRY(context->alist,
- context->count-1)->a_valuelen)
- : 0,
- (__psunsigned_t)context->dupcnt,
- (__psunsigned_t)context->flags,
- (__psunsigned_t)be16_to_cpu(leaf->hdr.count),
- (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval),
- (__psunsigned_t)be32_to_cpu(leaf->entries[
- be16_to_cpu(leaf->hdr.count)-1].hashval));
-}
-
-/*
- * Add a trace buffer entry for the arguments given to the routine,
- * generic form.
- */
-void
-xfs_attr_trace_enter(int type, char *where,
- __psunsigned_t a2, __psunsigned_t a3,
- __psunsigned_t a4, __psunsigned_t a5,
- __psunsigned_t a6, __psunsigned_t a7,
- __psunsigned_t a8, __psunsigned_t a9,
- __psunsigned_t a10, __psunsigned_t a11,
- __psunsigned_t a12, __psunsigned_t a13,
- __psunsigned_t a14, __psunsigned_t a15)
-{
- ASSERT(xfs_attr_trace_buf);
- ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type),
- (void *)where,
- (void *)a2, (void *)a3, (void *)a4,
- (void *)a5, (void *)a6, (void *)a7,
- (void *)a8, (void *)a9, (void *)a10,
- (void *)a11, (void *)a12, (void *)a13,
- (void *)a14, (void *)a15);
-}
-#endif /* XFS_ATTR_TRACE */
-
-
-/*========================================================================
- * System (pseudo) namespace attribute interface routines.
- *========================================================================*/
-
-STATIC int
-posix_acl_access_set(
- bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
-{
- return xfs_acl_vset(vp, data, size, _ACL_TYPE_ACCESS);
-}
-
-STATIC int
-posix_acl_access_remove(
- bhv_vnode_t *vp, char *name, int xflags)
-{
- return xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
-}
-
-STATIC int
-posix_acl_access_get(
- bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
-{
- return xfs_acl_vget(vp, data, size, _ACL_TYPE_ACCESS);
-}
-
-STATIC int
-posix_acl_access_exists(
- bhv_vnode_t *vp)
-{
- return xfs_acl_vhasacl_access(vp);
-}
-
-STATIC int
-posix_acl_default_set(
- bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
-{
- return xfs_acl_vset(vp, data, size, _ACL_TYPE_DEFAULT);
-}
-
-STATIC int
-posix_acl_default_get(
- bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
-{
- return xfs_acl_vget(vp, data, size, _ACL_TYPE_DEFAULT);
-}
-
-STATIC int
-posix_acl_default_remove(
- bhv_vnode_t *vp, char *name, int xflags)
-{
- return xfs_acl_vremove(vp, _ACL_TYPE_DEFAULT);
-}
-
-STATIC int
-posix_acl_default_exists(
- bhv_vnode_t *vp)
-{
- return xfs_acl_vhasacl_default(vp);
-}
-
-static struct attrnames posix_acl_access = {
- .attr_name = "posix_acl_access",
- .attr_namelen = sizeof("posix_acl_access") - 1,
- .attr_get = posix_acl_access_get,
- .attr_set = posix_acl_access_set,
- .attr_remove = posix_acl_access_remove,
- .attr_exists = posix_acl_access_exists,
-};
-
-static struct attrnames posix_acl_default = {
- .attr_name = "posix_acl_default",
- .attr_namelen = sizeof("posix_acl_default") - 1,
- .attr_get = posix_acl_default_get,
- .attr_set = posix_acl_default_set,
- .attr_remove = posix_acl_default_remove,
- .attr_exists = posix_acl_default_exists,
-};
-
-static struct attrnames *attr_system_names[] =
- { &posix_acl_access, &posix_acl_default };
-
-
-/*========================================================================
- * Namespace-prefix-style attribute name interface routines.
- *========================================================================*/
-
-STATIC int
-attr_generic_set(
- bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
-{
- return -xfs_attr_set(xfs_vtoi(vp), name, data, size, xflags);
-}
-
-STATIC int
-attr_generic_get(
- bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
-{
- int error, asize = size;
-
- error = xfs_attr_get(xfs_vtoi(vp), name, data, &asize, xflags);
- if (!error)
- return asize;
- return -error;
-}
-
-STATIC int
-attr_generic_remove(
- bhv_vnode_t *vp, char *name, int xflags)
-{
- return -xfs_attr_remove(xfs_vtoi(vp), name, xflags);
-}
-
-STATIC int
-attr_generic_listadd(
- attrnames_t *prefix,
- attrnames_t *namesp,
- void *data,
- size_t size,
- ssize_t *result)
-{
- char *p = data + *result;
-
- *result += prefix->attr_namelen;
- *result += namesp->attr_namelen + 1;
- if (!size)
- return 0;
- if (*result > size)
- return -ERANGE;
- strcpy(p, prefix->attr_name);
- p += prefix->attr_namelen;
- strcpy(p, namesp->attr_name);
- p += namesp->attr_namelen + 1;
- return 0;
-}
-
-STATIC int
-attr_system_list(
- bhv_vnode_t *vp,
- void *data,
- size_t size,
- ssize_t *result)
-{
- attrnames_t *namesp;
- int i, error = 0;
-
- for (i = 0; i < ATTR_SYSCOUNT; i++) {
- namesp = attr_system_names[i];
- if (!namesp->attr_exists || !namesp->attr_exists(vp))
- continue;
- error = attr_generic_listadd(&attr_system, namesp,
- data, size, result);
- if (error)
- break;
- }
- return error;
-}
-
-int
-attr_generic_list(
- bhv_vnode_t *vp, void *data, size_t size, int xflags, ssize_t *result)
-{
- attrlist_cursor_kern_t cursor = { 0 };
- int error;
-
- error = xfs_attr_list(xfs_vtoi(vp), data, size, xflags, &cursor);
- if (error > 0)
- return -error;
- *result = -error;
- return attr_system_list(vp, data, size, result);
-}
-
-attrnames_t *
-attr_lookup_namespace(
- char *name,
- struct attrnames **names,
- int nnames)
-{
- int i;
-
- for (i = 0; i < nnames; i++)
- if (!strncmp(name, names[i]->attr_name, names[i]->attr_namelen))
- return names[i];
- return NULL;
-}
-
-/*
- * Some checks to prevent people abusing EAs to get over quota:
- * - Don't allow modifying user EAs on devices/symlinks;
- * - Don't allow modifying user EAs if sticky bit set;
- */
-STATIC int
-attr_user_capable(
- bhv_vnode_t *vp,
- cred_t *cred)
-{
- struct inode *inode = vn_to_inode(vp);
-
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return -EPERM;
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) &&
- !capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
- (current_fsuid(cred) != inode->i_uid) && !capable(CAP_FOWNER))
- return -EPERM;
- return 0;
-}
-
-STATIC int
-attr_trusted_capable(
- bhv_vnode_t *vp,
- cred_t *cred)
-{
- struct inode *inode = vn_to_inode(vp);
-
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return -EPERM;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- return 0;
-}
-
-STATIC int
-attr_system_set(
- bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
-{
- attrnames_t *namesp;
- int error;
-
- if (xflags & ATTR_CREATE)
- return -EINVAL;
-
- namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT);
- if (!namesp)
- return -EOPNOTSUPP;
- error = namesp->attr_set(vp, name, data, size, xflags);
- if (!error)
- error = vn_revalidate(vp);
- return error;
-}
-
-STATIC int
-attr_system_get(
- bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
-{
- attrnames_t *namesp;
-
- namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT);
- if (!namesp)
- return -EOPNOTSUPP;
- return namesp->attr_get(vp, name, data, size, xflags);
-}
-
-STATIC int
-attr_system_remove(
- bhv_vnode_t *vp, char *name, int xflags)
-{
- attrnames_t *namesp;
-
- namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT);
- if (!namesp)
- return -EOPNOTSUPP;
- return namesp->attr_remove(vp, name, xflags);
-}
-
-struct attrnames attr_system = {
- .attr_name = "system.",
- .attr_namelen = sizeof("system.") - 1,
- .attr_flag = ATTR_SYSTEM,
- .attr_get = attr_system_get,
- .attr_set = attr_system_set,
- .attr_remove = attr_system_remove,
- .attr_capable = (attrcapable_t)fs_noerr,
-};
-
-struct attrnames attr_trusted = {
- .attr_name = "trusted.",
- .attr_namelen = sizeof("trusted.") - 1,
- .attr_flag = ATTR_ROOT,
- .attr_get = attr_generic_get,
- .attr_set = attr_generic_set,
- .attr_remove = attr_generic_remove,
- .attr_capable = attr_trusted_capable,
-};
-
-struct attrnames attr_secure = {
- .attr_name = "security.",
- .attr_namelen = sizeof("security.") - 1,
- .attr_flag = ATTR_SECURE,
- .attr_get = attr_generic_get,
- .attr_set = attr_generic_set,
- .attr_remove = attr_generic_remove,
- .attr_capable = (attrcapable_t)fs_noerr,
-};
-
-struct attrnames attr_user = {
- .attr_name = "user.",
- .attr_namelen = sizeof("user.") - 1,
- .attr_get = attr_generic_get,
- .attr_set = attr_generic_set,
- .attr_remove = attr_generic_remove,
- .attr_capable = attr_user_capable,
-};
-
-struct attrnames *attr_namespaces[] =
- { &attr_system, &attr_trusted, &attr_secure, &attr_user };
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 6cfc9384fe3..dd482458947 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -18,9 +18,11 @@
#ifndef __XFS_ATTR_H__
#define __XFS_ATTR_H__
+struct xfs_inode;
+struct xfs_da_args;
+struct xfs_attr_list_context;
+
/*
- * xfs_attr.h
- *
* Large attribute lists are structured around Btrees where all the data
* elements are in the leaf nodes. Attribute names are hashed into an int,
* then that int is used as the index into the Btree. Since the hashval
@@ -35,35 +37,6 @@
* External interfaces
*========================================================================*/
-struct cred;
-struct xfs_attr_list_context;
-
-typedef int (*attrset_t)(bhv_vnode_t *, char *, void *, size_t, int);
-typedef int (*attrget_t)(bhv_vnode_t *, char *, void *, size_t, int);
-typedef int (*attrremove_t)(bhv_vnode_t *, char *, int);
-typedef int (*attrexists_t)(bhv_vnode_t *);
-typedef int (*attrcapable_t)(bhv_vnode_t *, struct cred *);
-
-typedef struct attrnames {
- char * attr_name;
- unsigned int attr_namelen;
- unsigned int attr_flag;
- attrget_t attr_get;
- attrset_t attr_set;
- attrremove_t attr_remove;
- attrexists_t attr_exists;
- attrcapable_t attr_capable;
-} attrnames_t;
-
-#define ATTR_NAMECOUNT 4
-extern struct attrnames attr_user;
-extern struct attrnames attr_secure;
-extern struct attrnames attr_system;
-extern struct attrnames attr_trusted;
-extern struct attrnames *attr_namespaces[ATTR_NAMECOUNT];
-
-extern attrnames_t *attr_lookup_namespace(char *, attrnames_t **, int);
-extern int attr_generic_list(bhv_vnode_t *, void *, size_t, int, ssize_t *);
#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */
#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
@@ -71,16 +44,19 @@ extern int attr_generic_list(bhv_vnode_t *, void *, size_t, int, ssize_t *);
#define ATTR_SECURE 0x0008 /* use attrs in security namespace */
#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */
#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */
-#define ATTR_SYSTEM 0x0100 /* use attrs in system (pseudo) namespace */
-#define ATTR_KERNACCESS 0x0400 /* [kernel] iaccess, inode held io-locked */
#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
-#define ATTR_KERNAMELS 0x4000 /* [kernel] list attr names (simple list) */
-#define ATTR_KERNORMALS 0x0800 /* [kernel] normal attr list: user+secure */
-#define ATTR_KERNROOTLS 0x8000 /* [kernel] include root in the attr list */
-#define ATTR_KERNFULLS (ATTR_KERNORMALS|ATTR_KERNROOTLS)
+#define XFS_ATTR_FLAGS \
+ { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
+ { ATTR_ROOT, "ROOT" }, \
+ { ATTR_TRUST, "TRUST" }, \
+ { ATTR_SECURE, "SECURE" }, \
+ { ATTR_CREATE, "CREATE" }, \
+ { ATTR_REPLACE, "REPLACE" }, \
+ { ATTR_KERNOTIME, "KERNOTIME" }, \
+ { ATTR_KERNOVAL, "KERNOVAL" }
/*
* The maximum size (into the kernel or returned from the kernel) of an
@@ -119,22 +95,6 @@ typedef struct attrlist_ent { /* data from attr_list() */
&((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ])
/*
- * Multi-attribute operation vector.
- */
-typedef struct attr_multiop {
- int am_opcode; /* operation to perform (ATTR_OP_GET, etc.) */
- int am_error; /* [out arg] result of this sub-op (an errno) */
- char *am_attrname; /* attribute name to work with */
- char *am_attrvalue; /* [in/out arg] attribute value (raw bytes) */
- int am_length; /* [in/out arg] length of value */
- int am_flags; /* bitwise OR of attr API flags defined above */
-} attr_multiop_t;
-
-#define ATTR_OP_GET 1 /* return the indicated attr's value */
-#define ATTR_OP_SET 2 /* set/create the indicated attr/value pair */
-#define ATTR_OP_REMOVE 3 /* remove the indicated attr */
-
-/*
* Kernel-internal version of the attrlist cursor.
*/
typedef struct attrlist_cursor_kern {
@@ -148,20 +108,47 @@ typedef struct attrlist_cursor_kern {
/*========================================================================
- * Function prototypes for the kernel.
+ * Structure used to pass context around among the routines.
*========================================================================*/
-struct xfs_inode;
-struct attrlist_cursor_kern;
-struct xfs_da_args;
+
+typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
+ unsigned char *, int, int, unsigned char *);
+
+typedef struct xfs_attr_list_context {
+ struct xfs_inode *dp; /* inode */
+ struct attrlist_cursor_kern *cursor; /* position in list */
+ char *alist; /* output buffer */
+ int seen_enough; /* T/F: seen enough of list? */
+ ssize_t count; /* num used entries */
+ int dupcnt; /* count dup hashvals seen */
+ int bufsize; /* total buffer size */
+ int firstu; /* first used byte in buffer */
+ int flags; /* from VOP call */
+ int resynch; /* T/F: resynch with cursor */
+ int put_value; /* T/F: need value for listent */
+ put_listent_func_t put_listent; /* list output fmt function */
+ int index; /* index into output buffer */
+} xfs_attr_list_context_t;
+
+
+/*========================================================================
+ * Function prototypes for the kernel.
+ *========================================================================*/
/*
* Overall external interface routines.
*/
int xfs_attr_inactive(struct xfs_inode *dp);
+int xfs_attr_list_int(struct xfs_attr_list_context *);
+int xfs_inode_hasattr(struct xfs_inode *ip);
+int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
+ unsigned char *value, int *valuelenp, int flags);
+int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
+ unsigned char *value, int valuelen, int flags);
+int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
+int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
+ int flags, struct attrlist_cursor_kern *cursor);
-int xfs_attr_shortform_getvalue(struct xfs_da_args *);
-int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int);
-int xfs_attr_rmtval_get(struct xfs_da_args *args);
#endif /* __XFS_ATTR_H__ */
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
new file mode 100644
index 00000000000..09480c57f06
--- /dev/null
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_attr_remote.h"
+#include "xfs_trans.h"
+#include "xfs_inode_item.h"
+#include "xfs_bmap.h"
+#include "xfs_attr.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_error.h"
+#include "xfs_quota.h"
+#include "xfs_trace.h"
+#include "xfs_dinode.h"
+#include "xfs_dir2.h"
+
+/*
+ * Look at all the extents for this logical region,
+ * invalidate any buffers that are incore/in transactions.
+ */
+STATIC int
+xfs_attr3_leaf_freextent(
+ struct xfs_trans **trans,
+ struct xfs_inode *dp,
+ xfs_dablk_t blkno,
+ int blkcnt)
+{
+ struct xfs_bmbt_irec map;
+ struct xfs_buf *bp;
+ xfs_dablk_t tblkno;
+ xfs_daddr_t dblkno;
+ int tblkcnt;
+ int dblkcnt;
+ int nmap;
+ int error;
+
+ /*
+ * Roll through the "value", invalidating the attribute value's
+ * blocks.
+ */
+ tblkno = blkno;
+ tblkcnt = blkcnt;
+ while (tblkcnt > 0) {
+ /*
+ * Try to remember where we decided to put the value.
+ */
+ nmap = 1;
+ error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
+ &map, &nmap, XFS_BMAPI_ATTRFORK);
+ if (error) {
+ return(error);
+ }
+ ASSERT(nmap == 1);
+ ASSERT(map.br_startblock != DELAYSTARTBLOCK);
+
+ /*
+ * If it's a hole, these are already unmapped
+ * so there's nothing to invalidate.
+ */
+ if (map.br_startblock != HOLESTARTBLOCK) {
+
+ dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
+ map.br_startblock);
+ dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
+ map.br_blockcount);
+ bp = xfs_trans_get_buf(*trans,
+ dp->i_mount->m_ddev_targp,
+ dblkno, dblkcnt, 0);
+ if (!bp)
+ return ENOMEM;
+ xfs_trans_binval(*trans, bp);
+ /*
+ * Roll to next transaction.
+ */
+ error = xfs_trans_roll(trans, dp);
+ if (error)
+ return (error);
+ }
+
+ tblkno += map.br_blockcount;
+ tblkcnt -= map.br_blockcount;
+ }
+
+ return(0);
+}
+
+/*
+ * Invalidate all of the "remote" value regions pointed to by a particular
+ * leaf block.
+ * Note that we must release the lock on the buffer so that we are not
+ * caught holding something that the logging code wants to flush to disk.
+ */
+STATIC int
+xfs_attr3_leaf_inactive(
+ struct xfs_trans **trans,
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_attr_leaf_entry *entry;
+ struct xfs_attr_leaf_name_remote *name_rmt;
+ struct xfs_attr_inactive_list *list;
+ struct xfs_attr_inactive_list *lp;
+ int error;
+ int count;
+ int size;
+ int tmp;
+ int i;
+
+ leaf = bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+
+ /*
+ * Count the number of "remote" value extents.
+ */
+ count = 0;
+ entry = xfs_attr3_leaf_entryp(leaf);
+ for (i = 0; i < ichdr.count; entry++, i++) {
+ if (be16_to_cpu(entry->nameidx) &&
+ ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
+ if (name_rmt->valueblk)
+ count++;
+ }
+ }
+
+ /*
+ * If there are no "remote" values, we're done.
+ */
+ if (count == 0) {
+ xfs_trans_brelse(*trans, bp);
+ return 0;
+ }
+
+ /*
+ * Allocate storage for a list of all the "remote" value extents.
+ */
+ size = count * sizeof(xfs_attr_inactive_list_t);
+ list = kmem_alloc(size, KM_SLEEP);
+
+ /*
+ * Identify each of the "remote" value extents.
+ */
+ lp = list;
+ entry = xfs_attr3_leaf_entryp(leaf);
+ for (i = 0; i < ichdr.count; entry++, i++) {
+ if (be16_to_cpu(entry->nameidx) &&
+ ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
+ if (name_rmt->valueblk) {
+ lp->valueblk = be32_to_cpu(name_rmt->valueblk);
+ lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
+ be32_to_cpu(name_rmt->valuelen));
+ lp++;
+ }
+ }
+ }
+ xfs_trans_brelse(*trans, bp); /* unlock for trans. in freextent() */
+
+ /*
+ * Invalidate each of the "remote" value extents.
+ */
+ error = 0;
+ for (lp = list, i = 0; i < count; i++, lp++) {
+ tmp = xfs_attr3_leaf_freextent(trans, dp,
+ lp->valueblk, lp->valuelen);
+
+ if (error == 0)
+ error = tmp; /* save only the 1st errno */
+ }
+
+ kmem_free(list);
+ return error;
+}
+
+/*
+ * Recurse (gasp!) through the attribute nodes until we find leaves.
+ * We're doing a depth-first traversal in order to invalidate everything.
+ */
+STATIC int
+xfs_attr3_node_inactive(
+ struct xfs_trans **trans,
+ struct xfs_inode *dp,
+ struct xfs_buf *bp,
+ int level)
+{
+ xfs_da_blkinfo_t *info;
+ xfs_da_intnode_t *node;
+ xfs_dablk_t child_fsb;
+ xfs_daddr_t parent_blkno, child_blkno;
+ int error, i;
+ struct xfs_buf *child_bp;
+ struct xfs_da_node_entry *btree;
+ struct xfs_da3_icnode_hdr ichdr;
+
+ /*
+ * Since this code is recursive (gasp!) we must protect ourselves.
+ */
+ if (level > XFS_DA_NODE_MAXDEPTH) {
+ xfs_trans_brelse(*trans, bp); /* no locks for later trans */
+ return XFS_ERROR(EIO);
+ }
+
+ node = bp->b_addr;
+ dp->d_ops->node_hdr_from_disk(&ichdr, node);
+ parent_blkno = bp->b_bn;
+ if (!ichdr.count) {
+ xfs_trans_brelse(*trans, bp);
+ return 0;
+ }
+ btree = dp->d_ops->node_tree_p(node);
+ child_fsb = be32_to_cpu(btree[0].before);
+ xfs_trans_brelse(*trans, bp); /* no locks for later trans */
+
+ /*
+ * If this is the node level just above the leaves, simply loop
+ * over the leaves removing all of them. If this is higher up
+ * in the tree, recurse downward.
+ */
+ for (i = 0; i < ichdr.count; i++) {
+ /*
+ * Read the subsidiary block to see what we have to work with.
+ * Don't do this in a transaction. This is a depth-first
+ * traversal of the tree so we may deal with many blocks
+ * before we come back to this one.
+ */
+ error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp,
+ XFS_ATTR_FORK);
+ if (error)
+ return(error);
+ if (child_bp) {
+ /* save for re-read later */
+ child_blkno = XFS_BUF_ADDR(child_bp);
+
+ /*
+ * Invalidate the subtree, however we have to.
+ */
+ info = child_bp->b_addr;
+ switch (info->magic) {
+ case cpu_to_be16(XFS_DA_NODE_MAGIC):
+ case cpu_to_be16(XFS_DA3_NODE_MAGIC):
+ error = xfs_attr3_node_inactive(trans, dp,
+ child_bp, level + 1);
+ break;
+ case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
+ case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
+ error = xfs_attr3_leaf_inactive(trans, dp,
+ child_bp);
+ break;
+ default:
+ error = XFS_ERROR(EIO);
+ xfs_trans_brelse(*trans, child_bp);
+ break;
+ }
+ if (error)
+ return error;
+
+ /*
+ * Remove the subsidiary block from the cache
+ * and from the log.
+ */
+ error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
+ &child_bp, XFS_ATTR_FORK);
+ if (error)
+ return error;
+ xfs_trans_binval(*trans, child_bp);
+ }
+
+ /*
+ * If we're not done, re-read the parent to get the next
+ * child block number.
+ */
+ if (i + 1 < ichdr.count) {
+ error = xfs_da3_node_read(*trans, dp, 0, parent_blkno,
+ &bp, XFS_ATTR_FORK);
+ if (error)
+ return error;
+ child_fsb = be32_to_cpu(btree[i + 1].before);
+ xfs_trans_brelse(*trans, bp);
+ }
+ /*
+ * Atomically commit the whole invalidate stuff.
+ */
+ error = xfs_trans_roll(trans, dp);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Indiscriminately delete the entire attribute fork
+ *
+ * Recurse (gasp!) through the attribute nodes until we find leaves.
+ * We're doing a depth-first traversal in order to invalidate everything.
+ */
+int
+xfs_attr3_root_inactive(
+ struct xfs_trans **trans,
+ struct xfs_inode *dp)
+{
+ struct xfs_da_blkinfo *info;
+ struct xfs_buf *bp;
+ xfs_daddr_t blkno;
+ int error;
+
+ /*
+ * Read block 0 to see what we have to work with.
+ * We only get here if we have extents, since we remove
+ * the extents in reverse order the extent containing
+ * block 0 must still be there.
+ */
+ error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
+ if (error)
+ return error;
+ blkno = bp->b_bn;
+
+ /*
+ * Invalidate the tree, even if the "tree" is only a single leaf block.
+ * This is a depth-first traversal!
+ */
+ info = bp->b_addr;
+ switch (info->magic) {
+ case cpu_to_be16(XFS_DA_NODE_MAGIC):
+ case cpu_to_be16(XFS_DA3_NODE_MAGIC):
+ error = xfs_attr3_node_inactive(trans, dp, bp, 1);
+ break;
+ case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
+ case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
+ error = xfs_attr3_leaf_inactive(trans, dp, bp);
+ break;
+ default:
+ error = XFS_ERROR(EIO);
+ xfs_trans_brelse(*trans, bp);
+ break;
+ }
+ if (error)
+ return error;
+
+ /*
+ * Invalidate the incore copy of the root block.
+ */
+ error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
+ if (error)
+ return error;
+ xfs_trans_binval(*trans, bp); /* remove from cache */
+ /*
+ * Commit the invalidate and start the next transaction.
+ */
+ error = xfs_trans_roll(trans, dp);
+
+ return error;
+}
+
+int
+xfs_attr_inactive(xfs_inode_t *dp)
+{
+ xfs_trans_t *trans;
+ xfs_mount_t *mp;
+ int error;
+
+ mp = dp->i_mount;
+ ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
+
+ xfs_ilock(dp, XFS_ILOCK_SHARED);
+ if (!xfs_inode_hasattr(dp) ||
+ dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+ xfs_iunlock(dp, XFS_ILOCK_SHARED);
+ return 0;
+ }
+ xfs_iunlock(dp, XFS_ILOCK_SHARED);
+
+ /*
+ * Start our first transaction of the day.
+ *
+ * All future transactions during this code must be "chained" off
+ * this one via the trans_dup() call. All transactions will contain
+ * the inode, and the inode will always be marked with trans_ihold().
+ * Since the inode will be locked in all transactions, we must log
+ * the inode in every transaction to let it float upward through
+ * the log.
+ */
+ trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
+ error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
+ if (error) {
+ xfs_trans_cancel(trans, 0);
+ return(error);
+ }
+ xfs_ilock(dp, XFS_ILOCK_EXCL);
+
+ /*
+ * No need to make quota reservations here. We expect to release some
+ * blocks, not allocate, in the common case.
+ */
+ xfs_trans_ijoin(trans, dp, 0);
+
+ /*
+ * Decide on what work routines to call based on the inode size.
+ */
+ if (!xfs_inode_hasattr(dp) ||
+ dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+ error = 0;
+ goto out;
+ }
+ error = xfs_attr3_root_inactive(&trans, dp);
+ if (error)
+ goto out;
+
+ error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
+ if (error)
+ goto out;
+
+ error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
+ xfs_iunlock(dp, XFS_ILOCK_EXCL);
+
+ return(error);
+
+out:
+ xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+ xfs_iunlock(dp, XFS_ILOCK_EXCL);
+ return(error);
+}
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 303d41e4217..28712d29e43 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -17,31 +18,32 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
+#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_remote.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_buf_item.h"
+#include "xfs_cksum.h"
+#include "xfs_dinode.h"
+#include "xfs_dir2.h"
+
/*
* xfs_attr_leaf.c
@@ -56,78 +58,232 @@
/*
* Routines used for growing the Btree.
*/
-STATIC int xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t which_block,
- xfs_dabuf_t **bpp);
-STATIC int xfs_attr_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args,
- int freemap_index);
-STATIC void xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer);
-STATIC void xfs_attr_leaf_rebalance(xfs_da_state_t *state,
+STATIC int xfs_attr3_leaf_create(struct xfs_da_args *args,
+ xfs_dablk_t which_block, struct xfs_buf **bpp);
+STATIC int xfs_attr3_leaf_add_work(struct xfs_buf *leaf_buffer,
+ struct xfs_attr3_icleaf_hdr *ichdr,
+ struct xfs_da_args *args, int freemap_index);
+STATIC void xfs_attr3_leaf_compact(struct xfs_da_args *args,
+ struct xfs_attr3_icleaf_hdr *ichdr,
+ struct xfs_buf *leaf_buffer);
+STATIC void xfs_attr3_leaf_rebalance(xfs_da_state_t *state,
xfs_da_state_blk_t *blk1,
xfs_da_state_blk_t *blk2);
-STATIC int xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
- xfs_da_state_blk_t *leaf_blk_1,
- xfs_da_state_blk_t *leaf_blk_2,
- int *number_entries_in_blk1,
- int *number_usedbytes_in_blk1);
-
-/*
- * Routines used for shrinking the Btree.
- */
-STATIC int xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
- xfs_dabuf_t *bp, int level);
-STATIC int xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
- xfs_dabuf_t *bp);
-STATIC int xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
- xfs_dablk_t blkno, int blkcnt);
+STATIC int xfs_attr3_leaf_figure_balance(xfs_da_state_t *state,
+ xfs_da_state_blk_t *leaf_blk_1,
+ struct xfs_attr3_icleaf_hdr *ichdr1,
+ xfs_da_state_blk_t *leaf_blk_2,
+ struct xfs_attr3_icleaf_hdr *ichdr2,
+ int *number_entries_in_blk1,
+ int *number_usedbytes_in_blk1);
/*
* Utility routines.
*/
-STATIC void xfs_attr_leaf_moveents(xfs_attr_leafblock_t *src_leaf,
- int src_start,
- xfs_attr_leafblock_t *dst_leaf,
- int dst_start, int move_count,
- xfs_mount_t *mp);
+STATIC void xfs_attr3_leaf_moveents(struct xfs_da_args *args,
+ struct xfs_attr_leafblock *src_leaf,
+ struct xfs_attr3_icleaf_hdr *src_ichdr, int src_start,
+ struct xfs_attr_leafblock *dst_leaf,
+ struct xfs_attr3_icleaf_hdr *dst_ichdr, int dst_start,
+ int move_count);
STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
-/*========================================================================
- * Namespace helper routines
- *========================================================================*/
+void
+xfs_attr3_leaf_hdr_from_disk(
+ struct xfs_attr3_icleaf_hdr *to,
+ struct xfs_attr_leafblock *from)
+{
+ int i;
+
+ ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
+ from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
+
+ if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) {
+ struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)from;
+
+ to->forw = be32_to_cpu(hdr3->info.hdr.forw);
+ to->back = be32_to_cpu(hdr3->info.hdr.back);
+ to->magic = be16_to_cpu(hdr3->info.hdr.magic);
+ to->count = be16_to_cpu(hdr3->count);
+ to->usedbytes = be16_to_cpu(hdr3->usedbytes);
+ to->firstused = be16_to_cpu(hdr3->firstused);
+ to->holes = hdr3->holes;
+
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ to->freemap[i].base = be16_to_cpu(hdr3->freemap[i].base);
+ to->freemap[i].size = be16_to_cpu(hdr3->freemap[i].size);
+ }
+ return;
+ }
+ to->forw = be32_to_cpu(from->hdr.info.forw);
+ to->back = be32_to_cpu(from->hdr.info.back);
+ to->magic = be16_to_cpu(from->hdr.info.magic);
+ to->count = be16_to_cpu(from->hdr.count);
+ to->usedbytes = be16_to_cpu(from->hdr.usedbytes);
+ to->firstused = be16_to_cpu(from->hdr.firstused);
+ to->holes = from->hdr.holes;
+
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ to->freemap[i].base = be16_to_cpu(from->hdr.freemap[i].base);
+ to->freemap[i].size = be16_to_cpu(from->hdr.freemap[i].size);
+ }
+}
-STATIC_INLINE attrnames_t *
-xfs_attr_flags_namesp(int flags)
+void
+xfs_attr3_leaf_hdr_to_disk(
+ struct xfs_attr_leafblock *to,
+ struct xfs_attr3_icleaf_hdr *from)
{
- return ((flags & XFS_ATTR_SECURE) ? &attr_secure:
- ((flags & XFS_ATTR_ROOT) ? &attr_trusted : &attr_user));
+ int i;
+
+ ASSERT(from->magic == XFS_ATTR_LEAF_MAGIC ||
+ from->magic == XFS_ATTR3_LEAF_MAGIC);
+
+ if (from->magic == XFS_ATTR3_LEAF_MAGIC) {
+ struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)to;
+
+ hdr3->info.hdr.forw = cpu_to_be32(from->forw);
+ hdr3->info.hdr.back = cpu_to_be32(from->back);
+ hdr3->info.hdr.magic = cpu_to_be16(from->magic);
+ hdr3->count = cpu_to_be16(from->count);
+ hdr3->usedbytes = cpu_to_be16(from->usedbytes);
+ hdr3->firstused = cpu_to_be16(from->firstused);
+ hdr3->holes = from->holes;
+ hdr3->pad1 = 0;
+
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ hdr3->freemap[i].base = cpu_to_be16(from->freemap[i].base);
+ hdr3->freemap[i].size = cpu_to_be16(from->freemap[i].size);
+ }
+ return;
+ }
+ to->hdr.info.forw = cpu_to_be32(from->forw);
+ to->hdr.info.back = cpu_to_be32(from->back);
+ to->hdr.info.magic = cpu_to_be16(from->magic);
+ to->hdr.count = cpu_to_be16(from->count);
+ to->hdr.usedbytes = cpu_to_be16(from->usedbytes);
+ to->hdr.firstused = cpu_to_be16(from->firstused);
+ to->hdr.holes = from->holes;
+ to->hdr.pad1 = 0;
+
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ to->hdr.freemap[i].base = cpu_to_be16(from->freemap[i].base);
+ to->hdr.freemap[i].size = cpu_to_be16(from->freemap[i].size);
+ }
+}
+
+static bool
+xfs_attr3_leaf_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_attr_leafblock *leaf = bp->b_addr;
+ struct xfs_attr3_icleaf_hdr ichdr;
+
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
+
+ if (ichdr.magic != XFS_ATTR3_LEAF_MAGIC)
+ return false;
+
+ if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid))
+ return false;
+ if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
+ return false;
+ } else {
+ if (ichdr.magic != XFS_ATTR_LEAF_MAGIC)
+ return false;
+ }
+ if (ichdr.count == 0)
+ return false;
+
+ /* XXX: need to range check rest of attr header values */
+ /* XXX: hash order check? */
+
+ return true;
+}
+
+static void
+xfs_attr3_leaf_write_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr;
+
+ if (!xfs_attr3_leaf_verify(bp)) {
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ xfs_verifier_error(bp);
+ return;
+ }
+
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return;
+
+ if (bip)
+ hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
+
+ xfs_buf_update_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF);
}
/*
- * If namespace bits don't match return 0.
- * If all match then return 1.
+ * leaf/node format detection on trees is sketchy, so a node read can be done on
+ * leaf level blocks when detection identifies the tree as a node format tree
+ * incorrectly. In this case, we need to swap the verifier to match the correct
+ * format of the block being read.
*/
-STATIC_INLINE int
-xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
+static void
+xfs_attr3_leaf_read_verify(
+ struct xfs_buf *bp)
{
- return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ !xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF))
+ xfs_buf_ioerror(bp, EFSBADCRC);
+ else if (!xfs_attr3_leaf_verify(bp))
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+
+ if (bp->b_error)
+ xfs_verifier_error(bp);
+}
+
+const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
+ .verify_read = xfs_attr3_leaf_read_verify,
+ .verify_write = xfs_attr3_leaf_write_verify,
+};
+
+int
+xfs_attr3_leaf_read(
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
+ xfs_dablk_t bno,
+ xfs_daddr_t mappedbno,
+ struct xfs_buf **bpp)
+{
+ int err;
+
+ err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
+ XFS_ATTR_FORK, &xfs_attr3_leaf_buf_ops);
+ if (!err && tp)
+ xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_ATTR_LEAF_BUF);
+ return err;
}
+/*========================================================================
+ * Namespace helper routines
+ *========================================================================*/
+
/*
- * If namespace bits don't match and we don't have an override for it
- * then return 0.
- * If all match or are overridable then return 1.
+ * If namespace bits don't match return 0.
+ * If all match then return 1.
*/
-STATIC_INLINE int
-xfs_attr_namesp_match_overrides(int arg_flags, int ondisk_flags)
+STATIC int
+xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
{
- if (((arg_flags & ATTR_SECURE) == 0) !=
- ((ondisk_flags & XFS_ATTR_SECURE) == 0) &&
- !(arg_flags & ATTR_KERNORMALS))
- return 0;
- if (((arg_flags & ATTR_ROOT) == 0) !=
- ((ondisk_flags & XFS_ATTR_ROOT) == 0) &&
- !(arg_flags & ATTR_KERNROOTLS))
- return 0;
- return 1;
+ return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
}
@@ -138,6 +294,7 @@ xfs_attr_namesp_match_overrides(int arg_flags, int ondisk_flags)
/*
* Query whether the requested number of additional bytes of extended
* attribute space will be able to fit inline.
+ *
* Returns zero if not, else the di_forkoff fork offset to be used in the
* literal area for attribute data once the new bytes have been added.
*
@@ -150,10 +307,11 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
int offset;
int minforkoff; /* lower limit on valid forkoff locations */
int maxforkoff; /* upper limit on valid forkoff locations */
- int dsize;
+ int dsize;
xfs_mount_t *mp = dp->i_mount;
- offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
+ /* rounded down */
+ offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3;
switch (dp->i_d.di_format) {
case XFS_DINODE_FMT_DEV:
@@ -164,59 +322,74 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
return (offset >= minforkoff) ? minforkoff : 0;
}
- if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
- if (bytes <= XFS_IFORK_ASIZE(dp))
- return dp->i_d.di_forkoff;
+ /*
+ * If the requested numbers of bytes is smaller or equal to the
+ * current attribute fork size we can always proceed.
+ *
+ * Note that if_bytes in the data fork might actually be larger than
+ * the current data fork size is due to delalloc extents. In that
+ * case either the extent count will go down when they are converted
+ * to real extents, or the delalloc conversion will take care of the
+ * literal area rebalancing.
+ */
+ if (bytes <= XFS_IFORK_ASIZE(dp))
+ return dp->i_d.di_forkoff;
+
+ /*
+ * For attr2 we can try to move the forkoff if there is space in the
+ * literal area, but for the old format we are done if there is no
+ * space in the fixed attribute fork.
+ */
+ if (!(mp->m_flags & XFS_MOUNT_ATTR2))
return 0;
- }
dsize = dp->i_df.if_bytes;
-
+
switch (dp->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
- /*
+ /*
* If there is no attr fork and the data fork is extents,
- * determine if creating the default attr fork will result
- * in the extents form migrating to btree. If so, the
- * minimum offset only needs to be the space required for
+ * determine if creating the default attr fork will result
+ * in the extents form migrating to btree. If so, the
+ * minimum offset only needs to be the space required for
* the btree root.
- */
- if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > mp->m_attroffset)
+ */
+ if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
+ xfs_default_attroffset(dp))
dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
break;
-
case XFS_DINODE_FMT_BTREE:
/*
- * If have data btree then keep forkoff if we have one,
- * otherwise we are adding a new attr, so then we set
- * minforkoff to where the btree root can finish so we have
+ * If we have a data btree then keep forkoff if we have one,
+ * otherwise we are adding a new attr, so then we set
+ * minforkoff to where the btree root can finish so we have
* plenty of room for attrs
*/
if (dp->i_d.di_forkoff) {
- if (offset < dp->i_d.di_forkoff)
+ if (offset < dp->i_d.di_forkoff)
return 0;
- else
- return dp->i_d.di_forkoff;
- } else
- dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
+ return dp->i_d.di_forkoff;
+ }
+ dsize = XFS_BMAP_BROOT_SPACE(mp, dp->i_df.if_broot);
break;
}
-
- /*
- * A data fork btree root must have space for at least
+
+ /*
+ * A data fork btree root must have space for at least
* MINDBTPTRS key/ptr pairs if the data fork is small or empty.
*/
minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
minforkoff = roundup(minforkoff, 8) >> 3;
/* attr fork btree root can have at least this many key/ptr pairs */
- maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
+ maxforkoff = XFS_LITINO(mp, dp->i_d.di_version) -
+ XFS_BMDR_SPACE_CALC(MINABTPTRS);
maxforkoff = maxforkoff >> 3; /* rounded down */
- if (offset >= minforkoff && offset < maxforkoff)
- return offset;
if (offset >= maxforkoff)
return maxforkoff;
+ if (offset >= minforkoff)
+ return offset;
return 0;
}
@@ -248,6 +421,8 @@ xfs_attr_shortform_create(xfs_da_args_t *args)
xfs_inode_t *dp;
xfs_ifork_t *ifp;
+ trace_xfs_attr_sf_create(args);
+
dp = args->dp;
ASSERT(dp != NULL);
ifp = dp->i_afp;
@@ -281,13 +456,11 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
xfs_inode_t *dp;
xfs_ifork_t *ifp;
+ trace_xfs_attr_sf_add(args);
+
dp = args->dp;
mp = dp->i_mount;
dp->i_d.di_forkoff = forkoff;
- dp->i_df.if_ext_max =
- XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
- dp->i_afp->if_ext_max =
- XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
ifp = dp->i_afp;
ASSERT(ifp->if_flags & XFS_IFINLINE);
@@ -324,6 +497,25 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
}
/*
+ * After the last attribute is removed revert to original inode format,
+ * making all literal area available to the data fork once more.
+ */
+STATIC void
+xfs_attr_fork_reset(
+ struct xfs_inode *ip,
+ struct xfs_trans *tp)
+{
+ xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+ ip->i_d.di_forkoff = 0;
+ ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
+
+ ASSERT(ip->i_d.di_anextents == 0);
+ ASSERT(ip->i_afp == NULL);
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+}
+
+/*
* Remove an attribute from the shortform attribute list structure.
*/
int
@@ -335,6 +527,8 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
xfs_mount_t *mp;
xfs_inode_t *dp;
+ trace_xfs_attr_sf_remove(args);
+
dp = args->dp;
mp = dp->i_mount;
base = sizeof(xfs_attr_sf_hdr_t);
@@ -369,33 +563,19 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
* Fix up the start offset of the attribute fork
*/
totsize -= size;
- if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname &&
- (mp->m_flags & XFS_MOUNT_ATTR2) &&
- (dp->i_d.di_format != XFS_DINODE_FMT_BTREE)) {
- /*
- * Last attribute now removed, revert to original
- * inode format making all literal area available
- * to the data fork once more.
- */
- xfs_idestroy_fork(dp, XFS_ATTR_FORK);
- dp->i_d.di_forkoff = 0;
- dp->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
- ASSERT(dp->i_d.di_anextents == 0);
- ASSERT(dp->i_afp == NULL);
- dp->i_df.if_ext_max =
- XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
- xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
+ if (totsize == sizeof(xfs_attr_sf_hdr_t) &&
+ (mp->m_flags & XFS_MOUNT_ATTR2) &&
+ (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
+ !(args->op_flags & XFS_DA_OP_ADDNAME)) {
+ xfs_attr_fork_reset(dp, args->trans);
} else {
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
ASSERT(dp->i_d.di_forkoff);
- ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname ||
- !(mp->m_flags & XFS_MOUNT_ATTR2) ||
- dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
- dp->i_afp->if_ext_max =
- XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
- dp->i_df.if_ext_max =
- XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
+ ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) ||
+ (args->op_flags & XFS_DA_OP_ADDNAME) ||
+ !(mp->m_flags & XFS_MOUNT_ATTR2) ||
+ dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
xfs_trans_log_inode(args->trans, dp,
XFS_ILOG_CORE | XFS_ILOG_ADATA);
}
@@ -417,6 +597,8 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
int i;
xfs_ifork_t *ifp;
+ trace_xfs_attr_sf_lookup(args);
+
ifp = args->dp->i_afp;
ASSERT(ifp->if_flags & XFS_IFINLINE);
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
@@ -445,7 +627,7 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
xfs_attr_sf_entry_t *sfe;
int i;
- ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE);
+ ASSERT(args->dp->i_afp->if_flags == XFS_IFINLINE);
sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count;
@@ -485,9 +667,11 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
char *tmpbuffer;
int error, i, size;
xfs_dablk_t blkno;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
xfs_ifork_t *ifp;
+ trace_xfs_attr_sf_to_leaf(args);
+
dp = args->dp;
ifp = dp->i_afp;
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
@@ -498,6 +682,8 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
sf = (xfs_attr_shortform_t *)tmpbuffer;
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
+ xfs_bmap_local_to_extents_empty(dp, XFS_ATTR_FORK);
+
bp = NULL;
error = xfs_da_grow_inode(args, &blkno);
if (error) {
@@ -513,7 +699,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
}
ASSERT(blkno == 0);
- error = xfs_attr_leaf_create(args, blkno, &bp);
+ error = xfs_attr3_leaf_create(args, blkno, &bp);
if (error) {
error = xfs_da_shrink_inode(args, 0, bp);
bp = NULL;
@@ -526,25 +712,26 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
+ nargs.geo = args->geo;
nargs.firstblock = args->firstblock;
nargs.flist = args->flist;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
- nargs.oknoent = 1;
+ nargs.op_flags = XFS_DA_OP_OKNOENT;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count; i++) {
- nargs.name = (char *)sfe->nameval;
+ nargs.name = sfe->nameval;
nargs.namelen = sfe->namelen;
- nargs.value = (char *)&sfe->nameval[nargs.namelen];
+ nargs.value = &sfe->nameval[nargs.namelen];
nargs.valuelen = sfe->valuelen;
- nargs.hashval = xfs_da_hashname((char *)sfe->nameval,
+ nargs.hashval = xfs_da_hashname(sfe->nameval,
sfe->namelen);
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
- error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */
+ error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */
ASSERT(error == ENOATTR);
- error = xfs_attr_leaf_add(bp, &nargs);
+ error = xfs_attr3_leaf_add(bp, &nargs);
ASSERT(error != ENOSPC);
if (error)
goto out;
@@ -553,266 +740,85 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
error = 0;
out:
- if(bp)
- xfs_da_buf_done(bp);
- kmem_free(tmpbuffer, size);
+ kmem_free(tmpbuffer);
return(error);
}
-STATIC int
-xfs_attr_shortform_compare(const void *a, const void *b)
-{
- xfs_attr_sf_sort_t *sa, *sb;
-
- sa = (xfs_attr_sf_sort_t *)a;
- sb = (xfs_attr_sf_sort_t *)b;
- if (sa->hash < sb->hash) {
- return(-1);
- } else if (sa->hash > sb->hash) {
- return(1);
- } else {
- return(sa->entno - sb->entno);
- }
-}
-
-
-#define XFS_ISRESET_CURSOR(cursor) \
- (!((cursor)->initted) && !((cursor)->hashval) && \
- !((cursor)->blkno) && !((cursor)->offset))
-/*
- * Copy out entries of shortform attribute lists for attr_list().
- * Shortform attribute lists are not stored in hashval sorted order.
- * If the output buffer is not large enough to hold them all, then we
- * we have to calculate each entries' hashvalue and sort them before
- * we can begin returning them to the user.
- */
-/*ARGSUSED*/
-int
-xfs_attr_shortform_list(xfs_attr_list_context_t *context)
-{
- attrlist_cursor_kern_t *cursor;
- xfs_attr_sf_sort_t *sbuf, *sbp;
- xfs_attr_shortform_t *sf;
- xfs_attr_sf_entry_t *sfe;
- xfs_inode_t *dp;
- int sbsize, nsbuf, count, i;
- int error;
-
- ASSERT(context != NULL);
- dp = context->dp;
- ASSERT(dp != NULL);
- ASSERT(dp->i_afp != NULL);
- sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
- ASSERT(sf != NULL);
- if (!sf->hdr.count)
- return(0);
- cursor = context->cursor;
- ASSERT(cursor != NULL);
-
- xfs_attr_trace_l_c("sf start", context);
-
- /*
- * If the buffer is large enough and the cursor is at the start,
- * do not bother with sorting since we will return everything in
- * one buffer and another call using the cursor won't need to be
- * made.
- * Note the generous fudge factor of 16 overhead bytes per entry.
- * If bufsize is zero then put_listent must be a search function
- * and can just scan through what we have.
- */
- if (context->bufsize == 0 ||
- (XFS_ISRESET_CURSOR(cursor) &&
- (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
- for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
- attrnames_t *namesp;
-
- if (!xfs_attr_namesp_match_overrides(context->flags, sfe->flags)) {
- sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
- continue;
- }
- namesp = xfs_attr_flags_namesp(sfe->flags);
- error = context->put_listent(context,
- namesp,
- (char *)sfe->nameval,
- (int)sfe->namelen,
- (int)sfe->valuelen,
- (char*)&sfe->nameval[sfe->namelen]);
-
- /*
- * Either search callback finished early or
- * didn't fit it all in the buffer after all.
- */
- if (context->seen_enough)
- break;
-
- if (error)
- return error;
- sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
- }
- xfs_attr_trace_l_c("sf big-gulp", context);
- return(0);
- }
-
- /* do no more for a search callback */
- if (context->bufsize == 0)
- return 0;
-
- /*
- * It didn't all fit, so we have to sort everything on hashval.
- */
- sbsize = sf->hdr.count * sizeof(*sbuf);
- sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP);
-
- /*
- * Scan the attribute list for the rest of the entries, storing
- * the relevant info from only those that match into a buffer.
- */
- nsbuf = 0;
- for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
- if (unlikely(
- ((char *)sfe < (char *)sf) ||
- ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
- XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
- XFS_ERRLEVEL_LOW,
- context->dp->i_mount, sfe);
- xfs_attr_trace_l_c("sf corrupted", context);
- kmem_free(sbuf, sbsize);
- return XFS_ERROR(EFSCORRUPTED);
- }
- if (!xfs_attr_namesp_match_overrides(context->flags, sfe->flags)) {
- sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
- continue;
- }
- sbp->entno = i;
- sbp->hash = xfs_da_hashname((char *)sfe->nameval, sfe->namelen);
- sbp->name = (char *)sfe->nameval;
- sbp->namelen = sfe->namelen;
- /* These are bytes, and both on-disk, don't endian-flip */
- sbp->valuelen = sfe->valuelen;
- sbp->flags = sfe->flags;
- sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
- sbp++;
- nsbuf++;
- }
-
- /*
- * Sort the entries on hash then entno.
- */
- xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
-
- /*
- * Re-find our place IN THE SORTED LIST.
- */
- count = 0;
- cursor->initted = 1;
- cursor->blkno = 0;
- for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
- if (sbp->hash == cursor->hashval) {
- if (cursor->offset == count) {
- break;
- }
- count++;
- } else if (sbp->hash > cursor->hashval) {
- break;
- }
- }
- if (i == nsbuf) {
- kmem_free(sbuf, sbsize);
- xfs_attr_trace_l_c("blk end", context);
- return(0);
- }
-
- /*
- * Loop putting entries into the user buffer.
- */
- for ( ; i < nsbuf; i++, sbp++) {
- attrnames_t *namesp;
-
- namesp = xfs_attr_flags_namesp(sbp->flags);
-
- if (cursor->hashval != sbp->hash) {
- cursor->hashval = sbp->hash;
- cursor->offset = 0;
- }
- error = context->put_listent(context,
- namesp,
- sbp->name,
- sbp->namelen,
- sbp->valuelen,
- &sbp->name[sbp->namelen]);
- if (error)
- return error;
- if (context->seen_enough)
- break;
- cursor->offset++;
- }
-
- kmem_free(sbuf, sbsize);
- xfs_attr_trace_l_c("sf E-O-F", context);
- return(0);
-}
-
/*
* Check a leaf attribute block to see if all the entries would fit into
* a shortform attribute list.
*/
int
-xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
+xfs_attr_shortform_allfit(
+ struct xfs_buf *bp,
+ struct xfs_inode *dp)
{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_entry_t *entry;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr_leaf_entry *entry;
xfs_attr_leaf_name_local_t *name_loc;
- int bytes, i;
+ struct xfs_attr3_icleaf_hdr leafhdr;
+ int bytes;
+ int i;
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ leaf = bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+ entry = xfs_attr3_leaf_entryp(leaf);
- entry = &leaf->entries[0];
bytes = sizeof(struct xfs_attr_sf_hdr);
- for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
+ for (i = 0; i < leafhdr.count; entry++, i++) {
if (entry->flags & XFS_ATTR_INCOMPLETE)
continue; /* don't copy partial entries */
if (!(entry->flags & XFS_ATTR_LOCAL))
return(0);
- name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
+ name_loc = xfs_attr3_leaf_name_local(leaf, i);
if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX)
return(0);
if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
return(0);
- bytes += sizeof(struct xfs_attr_sf_entry)-1
+ bytes += sizeof(struct xfs_attr_sf_entry) - 1
+ name_loc->namelen
+ be16_to_cpu(name_loc->valuelen);
}
if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
(bytes == sizeof(struct xfs_attr_sf_hdr)))
- return(-1);
- return(xfs_attr_shortform_bytesfit(dp, bytes));
+ return -1;
+ return xfs_attr_shortform_bytesfit(dp, bytes);
}
/*
* Convert a leaf attribute list to shortform attribute list
*/
int
-xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
+xfs_attr3_leaf_to_shortform(
+ struct xfs_buf *bp,
+ struct xfs_da_args *args,
+ int forkoff)
{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_entry_t *entry;
- xfs_attr_leaf_name_local_t *name_loc;
- xfs_da_args_t nargs;
- xfs_inode_t *dp;
- char *tmpbuffer;
- int error, i;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_attr_leaf_entry *entry;
+ struct xfs_attr_leaf_name_local *name_loc;
+ struct xfs_da_args nargs;
+ struct xfs_inode *dp = args->dp;
+ char *tmpbuffer;
+ int error;
+ int i;
- dp = args->dp;
- tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP);
- ASSERT(tmpbuffer != NULL);
+ trace_xfs_attr_leaf_to_sf(args);
+
+ tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP);
+ if (!tmpbuffer)
+ return ENOMEM;
+
+ memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
- ASSERT(bp != NULL);
- memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ entry = xfs_attr3_leaf_entryp(leaf);
+
+ /* XXX (dgc): buffer is about to be marked stale - why zero it? */
+ memset(bp->b_addr, 0, args->geo->blksize);
/*
* Clean out the prior contents of the attribute list.
@@ -824,20 +830,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
if (forkoff == -1) {
ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
-
- /*
- * Last attribute was removed, revert to original
- * inode format making all literal area available
- * to the data fork once more.
- */
- xfs_idestroy_fork(dp, XFS_ATTR_FORK);
- dp->i_d.di_forkoff = 0;
- dp->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
- ASSERT(dp->i_d.di_anextents == 0);
- ASSERT(dp->i_afp == NULL);
- dp->i_df.if_ext_max =
- XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
- xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
+ xfs_attr_fork_reset(dp, args->trans);
goto out;
}
@@ -847,24 +840,25 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
* Copy the attributes
*/
memset((char *)&nargs, 0, sizeof(nargs));
+ nargs.geo = args->geo;
nargs.dp = dp;
nargs.firstblock = args->firstblock;
nargs.flist = args->flist;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
- nargs.oknoent = 1;
- entry = &leaf->entries[0];
- for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
+ nargs.op_flags = XFS_DA_OP_OKNOENT;
+
+ for (i = 0; i < ichdr.count; entry++, i++) {
if (entry->flags & XFS_ATTR_INCOMPLETE)
continue; /* don't copy partial entries */
if (!entry->nameidx)
continue;
ASSERT(entry->flags & XFS_ATTR_LOCAL);
- name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
- nargs.name = (char *)name_loc->nameval;
+ name_loc = xfs_attr3_leaf_name_local(leaf, i);
+ nargs.name = name_loc->nameval;
nargs.namelen = name_loc->namelen;
- nargs.value = (char *)&name_loc->nameval[nargs.namelen];
+ nargs.value = &name_loc->nameval[nargs.namelen];
nargs.valuelen = be16_to_cpu(name_loc->valuelen);
nargs.hashval = be32_to_cpu(entry->hashval);
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
@@ -873,69 +867,78 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
error = 0;
out:
- kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount));
- return(error);
+ kmem_free(tmpbuffer);
+ return error;
}
/*
* Convert from using a single leaf to a root node and a leaf.
*/
int
-xfs_attr_leaf_to_node(xfs_da_args_t *args)
+xfs_attr3_leaf_to_node(
+ struct xfs_da_args *args)
{
- xfs_attr_leafblock_t *leaf;
- xfs_da_intnode_t *node;
- xfs_inode_t *dp;
- xfs_dabuf_t *bp1, *bp2;
- xfs_dablk_t blkno;
- int error;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr3_icleaf_hdr icleafhdr;
+ struct xfs_attr_leaf_entry *entries;
+ struct xfs_da_node_entry *btree;
+ struct xfs_da3_icnode_hdr icnodehdr;
+ struct xfs_da_intnode *node;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_buf *bp1 = NULL;
+ struct xfs_buf *bp2 = NULL;
+ xfs_dablk_t blkno;
+ int error;
+
+ trace_xfs_attr_leaf_to_node(args);
- dp = args->dp;
- bp1 = bp2 = NULL;
error = xfs_da_grow_inode(args, &blkno);
if (error)
goto out;
- error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1,
- XFS_ATTR_FORK);
+ error = xfs_attr3_leaf_read(args->trans, dp, 0, -1, &bp1);
if (error)
goto out;
- ASSERT(bp1 != NULL);
- bp2 = NULL;
- error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp2,
- XFS_ATTR_FORK);
+
+ error = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp2, XFS_ATTR_FORK);
if (error)
goto out;
- ASSERT(bp2 != NULL);
- memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount));
- xfs_da_buf_done(bp1);
- bp1 = NULL;
- xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
+
+ /* copy leaf to new buffer, update identifiers */
+ xfs_trans_buf_set_type(args->trans, bp2, XFS_BLFT_ATTR_LEAF_BUF);
+ bp2->b_ops = bp1->b_ops;
+ memcpy(bp2->b_addr, bp1->b_addr, args->geo->blksize);
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_da3_blkinfo *hdr3 = bp2->b_addr;
+ hdr3->blkno = cpu_to_be64(bp2->b_bn);
+ }
+ xfs_trans_log_buf(args->trans, bp2, 0, args->geo->blksize - 1);
/*
* Set up the new root node.
*/
- error = xfs_da_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK);
+ error = xfs_da3_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK);
if (error)
goto out;
- node = bp1->data;
- leaf = bp2->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ node = bp1->b_addr;
+ dp->d_ops->node_hdr_from_disk(&icnodehdr, node);
+ btree = dp->d_ops->node_tree_p(node);
+
+ leaf = bp2->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&icleafhdr, leaf);
+ entries = xfs_attr3_leaf_entryp(leaf);
+
/* both on-disk, don't endian-flip twice */
- node->btree[0].hashval =
- leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval;
- node->btree[0].before = cpu_to_be32(blkno);
- node->hdr.count = cpu_to_be16(1);
- xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1);
+ btree[0].hashval = entries[icleafhdr.count - 1].hashval;
+ btree[0].before = cpu_to_be32(blkno);
+ icnodehdr.count = 1;
+ dp->d_ops->node_hdr_to_disk(node, &icnodehdr);
+ xfs_trans_log_buf(args->trans, bp1, 0, args->geo->blksize - 1);
error = 0;
out:
- if (bp1)
- xfs_da_buf_done(bp1);
- if (bp2)
- xfs_da_buf_done(bp2);
- return(error);
+ return error;
}
-
/*========================================================================
* Routines used for growing the Btree.
*========================================================================*/
@@ -945,51 +948,69 @@ out:
* or a leaf in a node attribute list.
*/
STATIC int
-xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
+xfs_attr3_leaf_create(
+ struct xfs_da_args *args,
+ xfs_dablk_t blkno,
+ struct xfs_buf **bpp)
{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_hdr_t *hdr;
- xfs_inode_t *dp;
- xfs_dabuf_t *bp;
- int error;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_buf *bp;
+ int error;
+
+ trace_xfs_attr_leaf_create(args);
- dp = args->dp;
- ASSERT(dp != NULL);
error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp,
XFS_ATTR_FORK);
if (error)
- return(error);
- ASSERT(bp != NULL);
- leaf = bp->data;
- memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
- hdr = &leaf->hdr;
- hdr->info.magic = cpu_to_be16(XFS_ATTR_LEAF_MAGIC);
- hdr->firstused = cpu_to_be16(XFS_LBSIZE(dp->i_mount));
- if (!hdr->firstused) {
- hdr->firstused = cpu_to_be16(
- XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN);
- }
+ return error;
+ bp->b_ops = &xfs_attr3_leaf_buf_ops;
+ xfs_trans_buf_set_type(args->trans, bp, XFS_BLFT_ATTR_LEAF_BUF);
+ leaf = bp->b_addr;
+ memset(leaf, 0, args->geo->blksize);
+
+ memset(&ichdr, 0, sizeof(ichdr));
+ ichdr.firstused = args->geo->blksize;
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_da3_blkinfo *hdr3 = bp->b_addr;
+
+ ichdr.magic = XFS_ATTR3_LEAF_MAGIC;
- hdr->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
- hdr->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr->firstused) -
- sizeof(xfs_attr_leaf_hdr_t));
+ hdr3->blkno = cpu_to_be64(bp->b_bn);
+ hdr3->owner = cpu_to_be64(dp->i_ino);
+ uuid_copy(&hdr3->uuid, &mp->m_sb.sb_uuid);
- xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1);
+ ichdr.freemap[0].base = sizeof(struct xfs_attr3_leaf_hdr);
+ } else {
+ ichdr.magic = XFS_ATTR_LEAF_MAGIC;
+ ichdr.freemap[0].base = sizeof(struct xfs_attr_leaf_hdr);
+ }
+ ichdr.freemap[0].size = ichdr.firstused - ichdr.freemap[0].base;
+
+ xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr);
+ xfs_trans_log_buf(args->trans, bp, 0, args->geo->blksize - 1);
*bpp = bp;
- return(0);
+ return 0;
}
/*
* Split the leaf node, rebalance, then add the new entry.
*/
int
-xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
- xfs_da_state_blk_t *newblk)
+xfs_attr3_leaf_split(
+ struct xfs_da_state *state,
+ struct xfs_da_state_blk *oldblk,
+ struct xfs_da_state_blk *newblk)
{
xfs_dablk_t blkno;
int error;
+ trace_xfs_attr_leaf_split(state->args);
+
/*
* Allocate space for a new leaf node.
*/
@@ -997,7 +1018,7 @@ xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
error = xfs_da_grow_inode(state->args, &blkno);
if (error)
return(error);
- error = xfs_attr_leaf_create(state->args, blkno, &newblk->bp);
+ error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp);
if (error)
return(error);
newblk->blkno = blkno;
@@ -1007,8 +1028,8 @@ xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
* Rebalance the entries across the two leaves.
* NOTE: rebalance() currently depends on the 2nd block being empty.
*/
- xfs_attr_leaf_rebalance(state, oldblk, newblk);
- error = xfs_da_blk_link(state, oldblk, newblk);
+ xfs_attr3_leaf_rebalance(state, oldblk, newblk);
+ error = xfs_da3_blk_link(state, oldblk, newblk);
if (error)
return(error);
@@ -1019,10 +1040,13 @@ xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
*
* Insert the "new" entry in the correct block.
*/
- if (state->inleaf)
- error = xfs_attr_leaf_add(oldblk->bp, state->args);
- else
- error = xfs_attr_leaf_add(newblk->bp, state->args);
+ if (state->inleaf) {
+ trace_xfs_attr_leaf_add_old(state->args);
+ error = xfs_attr3_leaf_add(oldblk->bp, state->args);
+ } else {
+ trace_xfs_attr_leaf_add_new(state->args);
+ error = xfs_attr3_leaf_add(newblk->bp, state->args);
+ }
/*
* Update last hashval in each block since we added the name.
@@ -1036,44 +1060,46 @@ xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
* Add a name to the leaf attribute list structure.
*/
int
-xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
+xfs_attr3_leaf_add(
+ struct xfs_buf *bp,
+ struct xfs_da_args *args)
{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_hdr_t *hdr;
- xfs_attr_leaf_map_t *map;
- int tablesize, entsize, sum, tmp, i;
-
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT((args->index >= 0)
- && (args->index <= be16_to_cpu(leaf->hdr.count)));
- hdr = &leaf->hdr;
- entsize = xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
- args->trans->t_mountp->m_sb.sb_blocksize, NULL);
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ int tablesize;
+ int entsize;
+ int sum;
+ int tmp;
+ int i;
+
+ trace_xfs_attr_leaf_add(args);
+
+ leaf = bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ ASSERT(args->index >= 0 && args->index <= ichdr.count);
+ entsize = xfs_attr_leaf_newentsize(args, NULL);
/*
* Search through freemap for first-fit on new name length.
* (may need to figure in size of entry struct too)
*/
- tablesize = (be16_to_cpu(hdr->count) + 1)
- * sizeof(xfs_attr_leaf_entry_t)
- + sizeof(xfs_attr_leaf_hdr_t);
- map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1];
- for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) {
- if (tablesize > be16_to_cpu(hdr->firstused)) {
- sum += be16_to_cpu(map->size);
+ tablesize = (ichdr.count + 1) * sizeof(xfs_attr_leaf_entry_t)
+ + xfs_attr3_leaf_hdr_size(leaf);
+ for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE - 1; i >= 0; i--) {
+ if (tablesize > ichdr.firstused) {
+ sum += ichdr.freemap[i].size;
continue;
}
- if (!map->size)
+ if (!ichdr.freemap[i].size)
continue; /* no space in this map */
tmp = entsize;
- if (be16_to_cpu(map->base) < be16_to_cpu(hdr->firstused))
+ if (ichdr.freemap[i].base < ichdr.firstused)
tmp += sizeof(xfs_attr_leaf_entry_t);
- if (be16_to_cpu(map->size) >= tmp) {
- tmp = xfs_attr_leaf_add_work(bp, args, i);
- return(tmp);
+ if (ichdr.freemap[i].size >= tmp) {
+ tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, i);
+ goto out_log_hdr;
}
- sum += be16_to_cpu(map->size);
+ sum += ichdr.freemap[i].size;
}
/*
@@ -1081,97 +1107,104 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
* and we don't have enough freespace, then compaction will do us
* no good and we should just give up.
*/
- if (!hdr->holes && (sum < entsize))
- return(XFS_ERROR(ENOSPC));
+ if (!ichdr.holes && sum < entsize)
+ return XFS_ERROR(ENOSPC);
/*
* Compact the entries to coalesce free space.
* This may change the hdr->count via dropping INCOMPLETE entries.
*/
- xfs_attr_leaf_compact(args->trans, bp);
+ xfs_attr3_leaf_compact(args, &ichdr, bp);
/*
* After compaction, the block is guaranteed to have only one
* free region, in freemap[0]. If it is not big enough, give up.
*/
- if (be16_to_cpu(hdr->freemap[0].size)
- < (entsize + sizeof(xfs_attr_leaf_entry_t)))
- return(XFS_ERROR(ENOSPC));
+ if (ichdr.freemap[0].size < (entsize + sizeof(xfs_attr_leaf_entry_t))) {
+ tmp = ENOSPC;
+ goto out_log_hdr;
+ }
- return(xfs_attr_leaf_add_work(bp, args, 0));
+ tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, 0);
+
+out_log_hdr:
+ xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr);
+ xfs_trans_log_buf(args->trans, bp,
+ XFS_DA_LOGRANGE(leaf, &leaf->hdr,
+ xfs_attr3_leaf_hdr_size(leaf)));
+ return tmp;
}
/*
* Add a name to a leaf attribute list structure.
*/
STATIC int
-xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
+xfs_attr3_leaf_add_work(
+ struct xfs_buf *bp,
+ struct xfs_attr3_icleaf_hdr *ichdr,
+ struct xfs_da_args *args,
+ int mapindex)
{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_hdr_t *hdr;
- xfs_attr_leaf_entry_t *entry;
- xfs_attr_leaf_name_local_t *name_loc;
- xfs_attr_leaf_name_remote_t *name_rmt;
- xfs_attr_leaf_map_t *map;
- xfs_mount_t *mp;
- int tmp, i;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr_leaf_entry *entry;
+ struct xfs_attr_leaf_name_local *name_loc;
+ struct xfs_attr_leaf_name_remote *name_rmt;
+ struct xfs_mount *mp;
+ int tmp;
+ int i;
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- hdr = &leaf->hdr;
- ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE));
- ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count)));
+ trace_xfs_attr_leaf_add_work(args);
+
+ leaf = bp->b_addr;
+ ASSERT(mapindex >= 0 && mapindex < XFS_ATTR_LEAF_MAPSIZE);
+ ASSERT(args->index >= 0 && args->index <= ichdr->count);
/*
* Force open some space in the entry array and fill it in.
*/
- entry = &leaf->entries[args->index];
- if (args->index < be16_to_cpu(hdr->count)) {
- tmp = be16_to_cpu(hdr->count) - args->index;
+ entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
+ if (args->index < ichdr->count) {
+ tmp = ichdr->count - args->index;
tmp *= sizeof(xfs_attr_leaf_entry_t);
- memmove((char *)(entry+1), (char *)entry, tmp);
- xfs_da_log_buf(args->trans, bp,
+ memmove(entry + 1, entry, tmp);
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
}
- be16_add_cpu(&hdr->count, 1);
+ ichdr->count++;
/*
* Allocate space for the new string (at the end of the run).
*/
- map = &hdr->freemap[mapindex];
mp = args->trans->t_mountp;
- ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));
- ASSERT((be16_to_cpu(map->base) & 0x3) == 0);
- ASSERT(be16_to_cpu(map->size) >=
- xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
- mp->m_sb.sb_blocksize, NULL));
- ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
- ASSERT((be16_to_cpu(map->size) & 0x3) == 0);
- be16_add_cpu(&map->size,
- -xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
- mp->m_sb.sb_blocksize, &tmp));
- entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) +
- be16_to_cpu(map->size));
+ ASSERT(ichdr->freemap[mapindex].base < args->geo->blksize);
+ ASSERT((ichdr->freemap[mapindex].base & 0x3) == 0);
+ ASSERT(ichdr->freemap[mapindex].size >=
+ xfs_attr_leaf_newentsize(args, NULL));
+ ASSERT(ichdr->freemap[mapindex].size < args->geo->blksize);
+ ASSERT((ichdr->freemap[mapindex].size & 0x3) == 0);
+
+ ichdr->freemap[mapindex].size -= xfs_attr_leaf_newentsize(args, &tmp);
+
+ entry->nameidx = cpu_to_be16(ichdr->freemap[mapindex].base +
+ ichdr->freemap[mapindex].size);
entry->hashval = cpu_to_be32(args->hashval);
entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
- if (args->rename) {
+ if (args->op_flags & XFS_DA_OP_RENAME) {
entry->flags |= XFS_ATTR_INCOMPLETE;
if ((args->blkno2 == args->blkno) &&
(args->index2 <= args->index)) {
args->index2++;
}
}
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
ASSERT((args->index == 0) ||
(be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval)));
- ASSERT((args->index == be16_to_cpu(hdr->count)-1) ||
+ ASSERT((args->index == ichdr->count - 1) ||
(be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval)));
/*
- * Copy the attribute name and value into the new space.
- *
* For "remote" attribute values, simply note that we need to
* allocate space for the "remote" value. We can't actually
* allocate the extents in this transaction, and we can't decide
@@ -1179,14 +1212,14 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
* as part of this transaction (a split operation for example).
*/
if (entry->flags & XFS_ATTR_LOCAL) {
- name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
+ name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
name_loc->namelen = args->namelen;
name_loc->valuelen = cpu_to_be16(args->valuelen);
memcpy((char *)name_loc->nameval, args->name, args->namelen);
memcpy((char *)&name_loc->nameval[args->namelen], args->value,
be16_to_cpu(name_loc->valuelen));
} else {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
name_rmt->namelen = args->namelen;
memcpy((char *)name_rmt->name, args->name, args->namelen);
entry->flags |= XFS_ATTR_INCOMPLETE;
@@ -1194,84 +1227,129 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
name_rmt->valuelen = 0;
name_rmt->valueblk = 0;
args->rmtblkno = 1;
- args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
+ args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
+ args->rmtvaluelen = args->valuelen;
}
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index),
+ xfs_trans_log_buf(args->trans, bp,
+ XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
xfs_attr_leaf_entsize(leaf, args->index)));
/*
* Update the control info for this leaf node
*/
- if (be16_to_cpu(entry->nameidx) < be16_to_cpu(hdr->firstused)) {
- /* both on-disk, don't endian-flip twice */
- hdr->firstused = entry->nameidx;
- }
- ASSERT(be16_to_cpu(hdr->firstused) >=
- ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr)));
- tmp = (be16_to_cpu(hdr->count)-1) * sizeof(xfs_attr_leaf_entry_t)
- + sizeof(xfs_attr_leaf_hdr_t);
- map = &hdr->freemap[0];
- for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
- if (be16_to_cpu(map->base) == tmp) {
- be16_add_cpu(&map->base, sizeof(xfs_attr_leaf_entry_t));
- be16_add_cpu(&map->size,
- -((int)sizeof(xfs_attr_leaf_entry_t)));
+ if (be16_to_cpu(entry->nameidx) < ichdr->firstused)
+ ichdr->firstused = be16_to_cpu(entry->nameidx);
+
+ ASSERT(ichdr->firstused >= ichdr->count * sizeof(xfs_attr_leaf_entry_t)
+ + xfs_attr3_leaf_hdr_size(leaf));
+ tmp = (ichdr->count - 1) * sizeof(xfs_attr_leaf_entry_t)
+ + xfs_attr3_leaf_hdr_size(leaf);
+
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ if (ichdr->freemap[i].base == tmp) {
+ ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
+ ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t);
}
}
- be16_add_cpu(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index));
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
- return(0);
+ ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
+ return 0;
}
/*
* Garbage collect a leaf attribute list block by copying it to a new buffer.
*/
STATIC void
-xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
+xfs_attr3_leaf_compact(
+ struct xfs_da_args *args,
+ struct xfs_attr3_icleaf_hdr *ichdr_dst,
+ struct xfs_buf *bp)
{
- xfs_attr_leafblock_t *leaf_s, *leaf_d;
- xfs_attr_leaf_hdr_t *hdr_s, *hdr_d;
- xfs_mount_t *mp;
- char *tmpbuffer;
+ struct xfs_attr_leafblock *leaf_src;
+ struct xfs_attr_leafblock *leaf_dst;
+ struct xfs_attr3_icleaf_hdr ichdr_src;
+ struct xfs_trans *trans = args->trans;
+ char *tmpbuffer;
- mp = trans->t_mountp;
- tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
- ASSERT(tmpbuffer != NULL);
- memcpy(tmpbuffer, bp->data, XFS_LBSIZE(mp));
- memset(bp->data, 0, XFS_LBSIZE(mp));
+ trace_xfs_attr_leaf_compact(args);
+
+ tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP);
+ memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
+ memset(bp->b_addr, 0, args->geo->blksize);
+ leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
+ leaf_dst = bp->b_addr;
/*
- * Copy basic information
+ * Copy the on-disk header back into the destination buffer to ensure
+ * all the information in the header that is not part of the incore
+ * header structure is preserved.
*/
- leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
- leaf_d = bp->data;
- hdr_s = &leaf_s->hdr;
- hdr_d = &leaf_d->hdr;
- hdr_d->info = hdr_s->info; /* struct copy */
- hdr_d->firstused = cpu_to_be16(XFS_LBSIZE(mp));
- /* handle truncation gracefully */
- if (!hdr_d->firstused) {
- hdr_d->firstused = cpu_to_be16(
- XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN);
- }
- hdr_d->usedbytes = 0;
- hdr_d->count = 0;
- hdr_d->holes = 0;
- hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
- hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) -
- sizeof(xfs_attr_leaf_hdr_t));
+ memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src));
+
+ /* Initialise the incore headers */
+ ichdr_src = *ichdr_dst; /* struct copy */
+ ichdr_dst->firstused = args->geo->blksize;
+ ichdr_dst->usedbytes = 0;
+ ichdr_dst->count = 0;
+ ichdr_dst->holes = 0;
+ ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src);
+ ichdr_dst->freemap[0].size = ichdr_dst->firstused -
+ ichdr_dst->freemap[0].base;
+
+ /* write the header back to initialise the underlying buffer */
+ xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst);
/*
* Copy all entry's in the same (sorted) order,
* but allocate name/value pairs packed and in sequence.
*/
- xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0,
- be16_to_cpu(hdr_s->count), mp);
- xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1);
+ xfs_attr3_leaf_moveents(args, leaf_src, &ichdr_src, 0,
+ leaf_dst, ichdr_dst, 0, ichdr_src.count);
+ /*
+ * this logs the entire buffer, but the caller must write the header
+ * back to the buffer when it is finished modifying it.
+ */
+ xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1);
+
+ kmem_free(tmpbuffer);
+}
+
+/*
+ * Compare two leaf blocks "order".
+ * Return 0 unless leaf2 should go before leaf1.
+ */
+static int
+xfs_attr3_leaf_order(
+ struct xfs_buf *leaf1_bp,
+ struct xfs_attr3_icleaf_hdr *leaf1hdr,
+ struct xfs_buf *leaf2_bp,
+ struct xfs_attr3_icleaf_hdr *leaf2hdr)
+{
+ struct xfs_attr_leaf_entry *entries1;
+ struct xfs_attr_leaf_entry *entries2;
+
+ entries1 = xfs_attr3_leaf_entryp(leaf1_bp->b_addr);
+ entries2 = xfs_attr3_leaf_entryp(leaf2_bp->b_addr);
+ if (leaf1hdr->count > 0 && leaf2hdr->count > 0 &&
+ ((be32_to_cpu(entries2[0].hashval) <
+ be32_to_cpu(entries1[0].hashval)) ||
+ (be32_to_cpu(entries2[leaf2hdr->count - 1].hashval) <
+ be32_to_cpu(entries1[leaf1hdr->count - 1].hashval)))) {
+ return 1;
+ }
+ return 0;
+}
+
+int
+xfs_attr_leaf_order(
+ struct xfs_buf *leaf1_bp,
+ struct xfs_buf *leaf2_bp)
+{
+ struct xfs_attr3_icleaf_hdr ichdr1;
+ struct xfs_attr3_icleaf_hdr ichdr2;
- kmem_free(tmpbuffer, XFS_LBSIZE(mp));
+ xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1_bp->b_addr);
+ xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2_bp->b_addr);
+ return xfs_attr3_leaf_order(leaf1_bp, &ichdr1, leaf2_bp, &ichdr2);
}
/*
@@ -1287,26 +1365,38 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
* the "new" and "old" values can end up in different blocks.
*/
STATIC void
-xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
- xfs_da_state_blk_t *blk2)
+xfs_attr3_leaf_rebalance(
+ struct xfs_da_state *state,
+ struct xfs_da_state_blk *blk1,
+ struct xfs_da_state_blk *blk2)
{
- xfs_da_args_t *args;
- xfs_da_state_blk_t *tmp_blk;
- xfs_attr_leafblock_t *leaf1, *leaf2;
- xfs_attr_leaf_hdr_t *hdr1, *hdr2;
- int count, totallen, max, space, swap;
+ struct xfs_da_args *args;
+ struct xfs_attr_leafblock *leaf1;
+ struct xfs_attr_leafblock *leaf2;
+ struct xfs_attr3_icleaf_hdr ichdr1;
+ struct xfs_attr3_icleaf_hdr ichdr2;
+ struct xfs_attr_leaf_entry *entries1;
+ struct xfs_attr_leaf_entry *entries2;
+ int count;
+ int totallen;
+ int max;
+ int space;
+ int swap;
/*
* Set up environment.
*/
ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC);
ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
- leaf1 = blk1->bp->data;
- leaf2 = blk2->bp->data;
- ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ leaf1 = blk1->bp->b_addr;
+ leaf2 = blk2->bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1);
+ xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2);
+ ASSERT(ichdr2.count == 0);
args = state->args;
+ trace_xfs_attr_leaf_rebalance(args);
+
/*
* Check ordering of blocks, reverse if it makes things simpler.
*
@@ -1314,16 +1404,23 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
* second block, this code should never set "swap".
*/
swap = 0;
- if (xfs_attr_leaf_order(blk1->bp, blk2->bp)) {
+ if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) {
+ struct xfs_da_state_blk *tmp_blk;
+ struct xfs_attr3_icleaf_hdr tmp_ichdr;
+
tmp_blk = blk1;
blk1 = blk2;
blk2 = tmp_blk;
- leaf1 = blk1->bp->data;
- leaf2 = blk2->bp->data;
+
+ /* struct copies to swap them rather than reconverting */
+ tmp_ichdr = ichdr1;
+ ichdr1 = ichdr2;
+ ichdr2 = tmp_ichdr;
+
+ leaf1 = blk1->bp->b_addr;
+ leaf2 = blk2->bp->b_addr;
swap = 1;
}
- hdr1 = &leaf1->hdr;
- hdr2 = &leaf2->hdr;
/*
* Examine entries until we reduce the absolute difference in
@@ -1333,82 +1430,80 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
* "inleaf" is true if the new entry should be inserted into blk1.
* If "swap" is also true, then reverse the sense of "inleaf".
*/
- state->inleaf = xfs_attr_leaf_figure_balance(state, blk1, blk2,
- &count, &totallen);
+ state->inleaf = xfs_attr3_leaf_figure_balance(state, blk1, &ichdr1,
+ blk2, &ichdr2,
+ &count, &totallen);
if (swap)
state->inleaf = !state->inleaf;
/*
* Move any entries required from leaf to leaf:
*/
- if (count < be16_to_cpu(hdr1->count)) {
+ if (count < ichdr1.count) {
/*
* Figure the total bytes to be added to the destination leaf.
*/
/* number entries being moved */
- count = be16_to_cpu(hdr1->count) - count;
- space = be16_to_cpu(hdr1->usedbytes) - totallen;
+ count = ichdr1.count - count;
+ space = ichdr1.usedbytes - totallen;
space += count * sizeof(xfs_attr_leaf_entry_t);
/*
* leaf2 is the destination, compact it if it looks tight.
*/
- max = be16_to_cpu(hdr2->firstused)
- - sizeof(xfs_attr_leaf_hdr_t);
- max -= be16_to_cpu(hdr2->count) * sizeof(xfs_attr_leaf_entry_t);
- if (space > max) {
- xfs_attr_leaf_compact(args->trans, blk2->bp);
- }
+ max = ichdr2.firstused - xfs_attr3_leaf_hdr_size(leaf1);
+ max -= ichdr2.count * sizeof(xfs_attr_leaf_entry_t);
+ if (space > max)
+ xfs_attr3_leaf_compact(args, &ichdr2, blk2->bp);
/*
* Move high entries from leaf1 to low end of leaf2.
*/
- xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count,
- leaf2, 0, count, state->mp);
+ xfs_attr3_leaf_moveents(args, leaf1, &ichdr1,
+ ichdr1.count - count, leaf2, &ichdr2, 0, count);
- xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
- xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
- } else if (count > be16_to_cpu(hdr1->count)) {
+ } else if (count > ichdr1.count) {
/*
* I assert that since all callers pass in an empty
* second buffer, this code should never execute.
*/
+ ASSERT(0);
/*
* Figure the total bytes to be added to the destination leaf.
*/
/* number entries being moved */
- count -= be16_to_cpu(hdr1->count);
- space = totallen - be16_to_cpu(hdr1->usedbytes);
+ count -= ichdr1.count;
+ space = totallen - ichdr1.usedbytes;
space += count * sizeof(xfs_attr_leaf_entry_t);
/*
* leaf1 is the destination, compact it if it looks tight.
*/
- max = be16_to_cpu(hdr1->firstused)
- - sizeof(xfs_attr_leaf_hdr_t);
- max -= be16_to_cpu(hdr1->count) * sizeof(xfs_attr_leaf_entry_t);
- if (space > max) {
- xfs_attr_leaf_compact(args->trans, blk1->bp);
- }
+ max = ichdr1.firstused - xfs_attr3_leaf_hdr_size(leaf1);
+ max -= ichdr1.count * sizeof(xfs_attr_leaf_entry_t);
+ if (space > max)
+ xfs_attr3_leaf_compact(args, &ichdr1, blk1->bp);
/*
* Move low entries from leaf2 to high end of leaf1.
*/
- xfs_attr_leaf_moveents(leaf2, 0, leaf1,
- be16_to_cpu(hdr1->count), count, state->mp);
-
- xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
- xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
+ xfs_attr3_leaf_moveents(args, leaf2, &ichdr2, 0, leaf1, &ichdr1,
+ ichdr1.count, count);
}
+ xfs_attr3_leaf_hdr_to_disk(leaf1, &ichdr1);
+ xfs_attr3_leaf_hdr_to_disk(leaf2, &ichdr2);
+ xfs_trans_log_buf(args->trans, blk1->bp, 0, args->geo->blksize - 1);
+ xfs_trans_log_buf(args->trans, blk2->bp, 0, args->geo->blksize - 1);
+
/*
* Copy out last hashval in each block for B-tree code.
*/
- blk1->hashval = be32_to_cpu(
- leaf1->entries[be16_to_cpu(leaf1->hdr.count)-1].hashval);
- blk2->hashval = be32_to_cpu(
- leaf2->entries[be16_to_cpu(leaf2->hdr.count)-1].hashval);
+ entries1 = xfs_attr3_leaf_entryp(leaf1);
+ entries2 = xfs_attr3_leaf_entryp(leaf2);
+ blk1->hashval = be32_to_cpu(entries1[ichdr1.count - 1].hashval);
+ blk2->hashval = be32_to_cpu(entries2[ichdr2.count - 1].hashval);
/*
* Adjust the expected index for insertion.
@@ -1422,22 +1517,35 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
* inserting. The index/blkno fields refer to the "old" entry,
* while the index2/blkno2 fields refer to the "new" entry.
*/
- if (blk1->index > be16_to_cpu(leaf1->hdr.count)) {
+ if (blk1->index > ichdr1.count) {
ASSERT(state->inleaf == 0);
- blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count);
+ blk2->index = blk1->index - ichdr1.count;
args->index = args->index2 = blk2->index;
args->blkno = args->blkno2 = blk2->blkno;
- } else if (blk1->index == be16_to_cpu(leaf1->hdr.count)) {
+ } else if (blk1->index == ichdr1.count) {
if (state->inleaf) {
args->index = blk1->index;
args->blkno = blk1->blkno;
args->index2 = 0;
args->blkno2 = blk2->blkno;
} else {
- blk2->index = blk1->index
- - be16_to_cpu(leaf1->hdr.count);
- args->index = args->index2 = blk2->index;
- args->blkno = args->blkno2 = blk2->blkno;
+ /*
+ * On a double leaf split, the original attr location
+ * is already stored in blkno2/index2, so don't
+ * overwrite it overwise we corrupt the tree.
+ */
+ blk2->index = blk1->index - ichdr1.count;
+ args->index = blk2->index;
+ args->blkno = blk2->blkno;
+ if (!state->extravalid) {
+ /*
+ * set the new attr location to match the old
+ * one and let the higher level split code
+ * decide where in the leaf to place it.
+ */
+ args->index2 = blk2->index;
+ args->blkno2 = blk2->blkno;
+ }
}
} else {
ASSERT(state->inleaf == 1);
@@ -1454,42 +1562,38 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
* GROT: Do a double-split for this case?
*/
STATIC int
-xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
- xfs_da_state_blk_t *blk1,
- xfs_da_state_blk_t *blk2,
- int *countarg, int *usedbytesarg)
+xfs_attr3_leaf_figure_balance(
+ struct xfs_da_state *state,
+ struct xfs_da_state_blk *blk1,
+ struct xfs_attr3_icleaf_hdr *ichdr1,
+ struct xfs_da_state_blk *blk2,
+ struct xfs_attr3_icleaf_hdr *ichdr2,
+ int *countarg,
+ int *usedbytesarg)
{
- xfs_attr_leafblock_t *leaf1, *leaf2;
- xfs_attr_leaf_hdr_t *hdr1, *hdr2;
- xfs_attr_leaf_entry_t *entry;
- int count, max, index, totallen, half;
- int lastdelta, foundit, tmp;
-
- /*
- * Set up environment.
- */
- leaf1 = blk1->bp->data;
- leaf2 = blk2->bp->data;
- hdr1 = &leaf1->hdr;
- hdr2 = &leaf2->hdr;
- foundit = 0;
- totallen = 0;
+ struct xfs_attr_leafblock *leaf1 = blk1->bp->b_addr;
+ struct xfs_attr_leafblock *leaf2 = blk2->bp->b_addr;
+ struct xfs_attr_leaf_entry *entry;
+ int count;
+ int max;
+ int index;
+ int totallen = 0;
+ int half;
+ int lastdelta;
+ int foundit = 0;
+ int tmp;
/*
* Examine entries until we reduce the absolute difference in
* byte usage between the two blocks to a minimum.
*/
- max = be16_to_cpu(hdr1->count) + be16_to_cpu(hdr2->count);
- half = (max+1) * sizeof(*entry);
- half += be16_to_cpu(hdr1->usedbytes) +
- be16_to_cpu(hdr2->usedbytes) +
- xfs_attr_leaf_newentsize(
- state->args->namelen,
- state->args->valuelen,
- state->blocksize, NULL);
+ max = ichdr1->count + ichdr2->count;
+ half = (max + 1) * sizeof(*entry);
+ half += ichdr1->usedbytes + ichdr2->usedbytes +
+ xfs_attr_leaf_newentsize(state->args, NULL);
half /= 2;
- lastdelta = state->blocksize;
- entry = &leaf1->entries[0];
+ lastdelta = state->args->geo->blksize;
+ entry = xfs_attr3_leaf_entryp(leaf1);
for (count = index = 0; count < max; entry++, index++, count++) {
#define XFS_ATTR_ABS(A) (((A) < 0) ? -(A) : (A))
@@ -1498,10 +1602,7 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
*/
if (count == blk1->index) {
tmp = totallen + sizeof(*entry) +
- xfs_attr_leaf_newentsize(
- state->args->namelen,
- state->args->valuelen,
- state->blocksize, NULL);
+ xfs_attr_leaf_newentsize(state->args, NULL);
if (XFS_ATTR_ABS(half - tmp) > lastdelta)
break;
lastdelta = XFS_ATTR_ABS(half - tmp);
@@ -1512,9 +1613,9 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
/*
* Wrap around into the second block if necessary.
*/
- if (count == be16_to_cpu(hdr1->count)) {
+ if (count == ichdr1->count) {
leaf1 = leaf2;
- entry = &leaf1->entries[0];
+ entry = xfs_attr3_leaf_entryp(leaf1);
index = 0;
}
@@ -1537,15 +1638,12 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
totallen -= count * sizeof(*entry);
if (foundit) {
totallen -= sizeof(*entry) +
- xfs_attr_leaf_newentsize(
- state->args->namelen,
- state->args->valuelen,
- state->blocksize, NULL);
+ xfs_attr_leaf_newentsize(state->args, NULL);
}
*countarg = count;
*usedbytesarg = totallen;
- return(foundit);
+ return foundit;
}
/*========================================================================
@@ -1564,14 +1662,22 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
* GROT: allow for INCOMPLETE entries in calculation.
*/
int
-xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
+xfs_attr3_leaf_toosmall(
+ struct xfs_da_state *state,
+ int *action)
{
- xfs_attr_leafblock_t *leaf;
- xfs_da_state_blk_t *blk;
- xfs_da_blkinfo_t *info;
- int count, bytes, forward, error, retval, i;
- xfs_dablk_t blkno;
- xfs_dabuf_t *bp;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_da_state_blk *blk;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_buf *bp;
+ xfs_dablk_t blkno;
+ int bytes;
+ int forward;
+ int error;
+ int retval;
+ int i;
+
+ trace_xfs_attr_leaf_toosmall(state->args);
/*
* Check for the degenerate case of the block being over 50% full.
@@ -1579,14 +1685,12 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
* to coalesce with a sibling.
*/
blk = &state->path.blk[ state->path.active-1 ];
- info = blk->bp->data;
- ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);
- leaf = (xfs_attr_leafblock_t *)info;
- count = be16_to_cpu(leaf->hdr.count);
- bytes = sizeof(xfs_attr_leaf_hdr_t) +
- count * sizeof(xfs_attr_leaf_entry_t) +
- be16_to_cpu(leaf->hdr.usedbytes);
- if (bytes > (state->blocksize >> 1)) {
+ leaf = blk->bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ bytes = xfs_attr3_leaf_hdr_size(leaf) +
+ ichdr.count * sizeof(xfs_attr_leaf_entry_t) +
+ ichdr.usedbytes;
+ if (bytes > (state->args->geo->blksize >> 1)) {
*action = 0; /* blk over 50%, don't try to join */
return(0);
}
@@ -1597,14 +1701,14 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
* coalesce it with a sibling block. We choose (arbitrarily)
* to merge with the forward block unless it is NULL.
*/
- if (count == 0) {
+ if (ichdr.count == 0) {
/*
* Make altpath point to the block we want to keep and
* path point to the block we want to drop (this one).
*/
- forward = (info->forw != 0);
+ forward = (ichdr.forw != 0);
memcpy(&state->altpath, &state->path, sizeof(state->path));
- error = xfs_da_path_shift(state, &state->altpath, forward,
+ error = xfs_da3_path_shift(state, &state->altpath, forward,
0, &retval);
if (error)
return(error);
@@ -1613,7 +1717,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
} else {
*action = 2;
}
- return(0);
+ return 0;
}
/*
@@ -1624,31 +1728,30 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
* to shrink an attribute list over time.
*/
/* start with smaller blk num */
- forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
+ forward = ichdr.forw < ichdr.back;
for (i = 0; i < 2; forward = !forward, i++) {
+ struct xfs_attr3_icleaf_hdr ichdr2;
if (forward)
- blkno = be32_to_cpu(info->forw);
+ blkno = ichdr.forw;
else
- blkno = be32_to_cpu(info->back);
+ blkno = ichdr.back;
if (blkno == 0)
continue;
- error = xfs_da_read_buf(state->args->trans, state->args->dp,
- blkno, -1, &bp, XFS_ATTR_FORK);
+ error = xfs_attr3_leaf_read(state->args->trans, state->args->dp,
+ blkno, -1, &bp);
if (error)
return(error);
- ASSERT(bp != NULL);
-
- leaf = (xfs_attr_leafblock_t *)info;
- count = be16_to_cpu(leaf->hdr.count);
- bytes = state->blocksize - (state->blocksize>>2);
- bytes -= be16_to_cpu(leaf->hdr.usedbytes);
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- count += be16_to_cpu(leaf->hdr.count);
- bytes -= be16_to_cpu(leaf->hdr.usedbytes);
- bytes -= count * sizeof(xfs_attr_leaf_entry_t);
- bytes -= sizeof(xfs_attr_leaf_hdr_t);
- xfs_da_brelse(state->args->trans, bp);
+
+ xfs_attr3_leaf_hdr_from_disk(&ichdr2, bp->b_addr);
+
+ bytes = state->args->geo->blksize -
+ (state->args->geo->blksize >> 2) -
+ ichdr.usedbytes - ichdr2.usedbytes -
+ ((ichdr.count + ichdr2.count) *
+ sizeof(xfs_attr_leaf_entry_t)) -
+ xfs_attr3_leaf_hdr_size(leaf);
+
+ xfs_trans_brelse(state->args->trans, bp);
if (bytes >= 0)
break; /* fits with at least 25% to spare */
}
@@ -1663,10 +1766,10 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
*/
memcpy(&state->altpath, &state->path, sizeof(state->path));
if (blkno < blk->blkno) {
- error = xfs_da_path_shift(state, &state->altpath, forward,
+ error = xfs_da3_path_shift(state, &state->altpath, forward,
0, &retval);
} else {
- error = xfs_da_path_shift(state, &state->path, forward,
+ error = xfs_da3_path_shift(state, &state->path, forward,
0, &retval);
}
if (error)
@@ -1686,29 +1789,35 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
* If two leaves are 37% full, when combined they will leave 25% free.
*/
int
-xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
+xfs_attr3_leaf_remove(
+ struct xfs_buf *bp,
+ struct xfs_da_args *args)
{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_hdr_t *hdr;
- xfs_attr_leaf_map_t *map;
- xfs_attr_leaf_entry_t *entry;
- int before, after, smallest, entsize;
- int tablesize, tmp, i;
- xfs_mount_t *mp;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_attr_leaf_entry *entry;
+ int before;
+ int after;
+ int smallest;
+ int entsize;
+ int tablesize;
+ int tmp;
+ int i;
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- hdr = &leaf->hdr;
- mp = args->trans->t_mountp;
- ASSERT((be16_to_cpu(hdr->count) > 0)
- && (be16_to_cpu(hdr->count) < (XFS_LBSIZE(mp)/8)));
- ASSERT((args->index >= 0)
- && (args->index < be16_to_cpu(hdr->count)));
- ASSERT(be16_to_cpu(hdr->firstused) >=
- ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr)));
- entry = &leaf->entries[args->index];
- ASSERT(be16_to_cpu(entry->nameidx) >= be16_to_cpu(hdr->firstused));
- ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));
+ trace_xfs_attr_leaf_remove(args);
+
+ leaf = bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+
+ ASSERT(ichdr.count > 0 && ichdr.count < args->geo->blksize / 8);
+ ASSERT(args->index >= 0 && args->index < ichdr.count);
+ ASSERT(ichdr.firstused >= ichdr.count * sizeof(*entry) +
+ xfs_attr3_leaf_hdr_size(leaf));
+
+ entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
+
+ ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused);
+ ASSERT(be16_to_cpu(entry->nameidx) < args->geo->blksize);
/*
* Scan through free region table:
@@ -1716,30 +1825,28 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
* find smallest free region in case we need to replace it,
* adjust any map that borders the entry table,
*/
- tablesize = be16_to_cpu(hdr->count) * sizeof(xfs_attr_leaf_entry_t)
- + sizeof(xfs_attr_leaf_hdr_t);
- map = &hdr->freemap[0];
- tmp = be16_to_cpu(map->size);
+ tablesize = ichdr.count * sizeof(xfs_attr_leaf_entry_t)
+ + xfs_attr3_leaf_hdr_size(leaf);
+ tmp = ichdr.freemap[0].size;
before = after = -1;
smallest = XFS_ATTR_LEAF_MAPSIZE - 1;
entsize = xfs_attr_leaf_entsize(leaf, args->index);
- for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
- ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));
- ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
- if (be16_to_cpu(map->base) == tablesize) {
- be16_add_cpu(&map->base,
- -((int)sizeof(xfs_attr_leaf_entry_t)));
- be16_add_cpu(&map->size, sizeof(xfs_attr_leaf_entry_t));
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ ASSERT(ichdr.freemap[i].base < args->geo->blksize);
+ ASSERT(ichdr.freemap[i].size < args->geo->blksize);
+ if (ichdr.freemap[i].base == tablesize) {
+ ichdr.freemap[i].base -= sizeof(xfs_attr_leaf_entry_t);
+ ichdr.freemap[i].size += sizeof(xfs_attr_leaf_entry_t);
}
- if ((be16_to_cpu(map->base) + be16_to_cpu(map->size))
- == be16_to_cpu(entry->nameidx)) {
+ if (ichdr.freemap[i].base + ichdr.freemap[i].size ==
+ be16_to_cpu(entry->nameidx)) {
before = i;
- } else if (be16_to_cpu(map->base)
- == (be16_to_cpu(entry->nameidx) + entsize)) {
+ } else if (ichdr.freemap[i].base ==
+ (be16_to_cpu(entry->nameidx) + entsize)) {
after = i;
- } else if (be16_to_cpu(map->size) < tmp) {
- tmp = be16_to_cpu(map->size);
+ } else if (ichdr.freemap[i].size < tmp) {
+ tmp = ichdr.freemap[i].size;
smallest = i;
}
}
@@ -1750,36 +1857,30 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
*/
if ((before >= 0) || (after >= 0)) {
if ((before >= 0) && (after >= 0)) {
- map = &hdr->freemap[before];
- be16_add_cpu(&map->size, entsize);
- be16_add_cpu(&map->size,
- be16_to_cpu(hdr->freemap[after].size));
- hdr->freemap[after].base = 0;
- hdr->freemap[after].size = 0;
+ ichdr.freemap[before].size += entsize;
+ ichdr.freemap[before].size += ichdr.freemap[after].size;
+ ichdr.freemap[after].base = 0;
+ ichdr.freemap[after].size = 0;
} else if (before >= 0) {
- map = &hdr->freemap[before];
- be16_add_cpu(&map->size, entsize);
+ ichdr.freemap[before].size += entsize;
} else {
- map = &hdr->freemap[after];
- /* both on-disk, don't endian flip twice */
- map->base = entry->nameidx;
- be16_add_cpu(&map->size, entsize);
+ ichdr.freemap[after].base = be16_to_cpu(entry->nameidx);
+ ichdr.freemap[after].size += entsize;
}
} else {
/*
* Replace smallest region (if it is smaller than free'd entry)
*/
- map = &hdr->freemap[smallest];
- if (be16_to_cpu(map->size) < entsize) {
- map->base = cpu_to_be16(be16_to_cpu(entry->nameidx));
- map->size = cpu_to_be16(entsize);
+ if (ichdr.freemap[smallest].size < entsize) {
+ ichdr.freemap[smallest].base = be16_to_cpu(entry->nameidx);
+ ichdr.freemap[smallest].size = entsize;
}
}
/*
* Did we remove the first entry?
*/
- if (be16_to_cpu(entry->nameidx) == be16_to_cpu(hdr->firstused))
+ if (be16_to_cpu(entry->nameidx) == ichdr.firstused)
smallest = 1;
else
smallest = 0;
@@ -1787,20 +1888,20 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
/*
* Compress the remaining entries and zero out the removed stuff.
*/
- memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize);
- be16_add_cpu(&hdr->usedbytes, -entsize);
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index),
+ memset(xfs_attr3_leaf_name(leaf, args->index), 0, entsize);
+ ichdr.usedbytes -= entsize;
+ xfs_trans_log_buf(args->trans, bp,
+ XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
entsize));
- tmp = (be16_to_cpu(hdr->count) - args->index)
- * sizeof(xfs_attr_leaf_entry_t);
- memmove((char *)entry, (char *)(entry+1), tmp);
- be16_add_cpu(&hdr->count, -1);
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
- entry = &leaf->entries[be16_to_cpu(hdr->count)];
- memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t));
+ tmp = (ichdr.count - args->index) * sizeof(xfs_attr_leaf_entry_t);
+ memmove(entry, entry + 1, tmp);
+ ichdr.count--;
+ xfs_trans_log_buf(args->trans, bp,
+ XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(xfs_attr_leaf_entry_t)));
+
+ entry = &xfs_attr3_leaf_entryp(leaf)[ichdr.count];
+ memset(entry, 0, sizeof(xfs_attr_leaf_entry_t));
/*
* If we removed the first entry, re-find the first used byte
@@ -1809,129 +1910,146 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
* removing the name.
*/
if (smallest) {
- tmp = XFS_LBSIZE(mp);
- entry = &leaf->entries[0];
- for (i = be16_to_cpu(hdr->count)-1; i >= 0; entry++, i--) {
- ASSERT(be16_to_cpu(entry->nameidx) >=
- be16_to_cpu(hdr->firstused));
- ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));
+ tmp = args->geo->blksize;
+ entry = xfs_attr3_leaf_entryp(leaf);
+ for (i = ichdr.count - 1; i >= 0; entry++, i--) {
+ ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused);
+ ASSERT(be16_to_cpu(entry->nameidx) < args->geo->blksize);
if (be16_to_cpu(entry->nameidx) < tmp)
tmp = be16_to_cpu(entry->nameidx);
}
- hdr->firstused = cpu_to_be16(tmp);
- if (!hdr->firstused) {
- hdr->firstused = cpu_to_be16(
- tmp - XFS_ATTR_LEAF_NAME_ALIGN);
- }
+ ichdr.firstused = tmp;
+ if (!ichdr.firstused)
+ ichdr.firstused = tmp - XFS_ATTR_LEAF_NAME_ALIGN;
} else {
- hdr->holes = 1; /* mark as needing compaction */
+ ichdr.holes = 1; /* mark as needing compaction */
}
- xfs_da_log_buf(args->trans, bp,
- XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
+ xfs_attr3_leaf_hdr_to_disk(leaf, &ichdr);
+ xfs_trans_log_buf(args->trans, bp,
+ XFS_DA_LOGRANGE(leaf, &leaf->hdr,
+ xfs_attr3_leaf_hdr_size(leaf)));
/*
* Check if leaf is less than 50% full, caller may want to
* "join" the leaf with a sibling if so.
*/
- tmp = sizeof(xfs_attr_leaf_hdr_t);
- tmp += be16_to_cpu(leaf->hdr.count) * sizeof(xfs_attr_leaf_entry_t);
- tmp += be16_to_cpu(leaf->hdr.usedbytes);
- return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */
+ tmp = ichdr.usedbytes + xfs_attr3_leaf_hdr_size(leaf) +
+ ichdr.count * sizeof(xfs_attr_leaf_entry_t);
+
+ return tmp < args->geo->magicpct; /* leaf is < 37% full */
}
/*
* Move all the attribute list entries from drop_leaf into save_leaf.
*/
void
-xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
- xfs_da_state_blk_t *save_blk)
+xfs_attr3_leaf_unbalance(
+ struct xfs_da_state *state,
+ struct xfs_da_state_blk *drop_blk,
+ struct xfs_da_state_blk *save_blk)
{
- xfs_attr_leafblock_t *drop_leaf, *save_leaf, *tmp_leaf;
- xfs_attr_leaf_hdr_t *drop_hdr, *save_hdr, *tmp_hdr;
- xfs_mount_t *mp;
- char *tmpbuffer;
+ struct xfs_attr_leafblock *drop_leaf = drop_blk->bp->b_addr;
+ struct xfs_attr_leafblock *save_leaf = save_blk->bp->b_addr;
+ struct xfs_attr3_icleaf_hdr drophdr;
+ struct xfs_attr3_icleaf_hdr savehdr;
+ struct xfs_attr_leaf_entry *entry;
- /*
- * Set up environment.
- */
- mp = state->mp;
- ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC);
- ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC);
- drop_leaf = drop_blk->bp->data;
- save_leaf = save_blk->bp->data;
- ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- drop_hdr = &drop_leaf->hdr;
- save_hdr = &save_leaf->hdr;
+ trace_xfs_attr_leaf_unbalance(state->args);
+
+ drop_leaf = drop_blk->bp->b_addr;
+ save_leaf = save_blk->bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&drophdr, drop_leaf);
+ xfs_attr3_leaf_hdr_from_disk(&savehdr, save_leaf);
+ entry = xfs_attr3_leaf_entryp(drop_leaf);
/*
* Save last hashval from dying block for later Btree fixup.
*/
- drop_blk->hashval = be32_to_cpu(
- drop_leaf->entries[be16_to_cpu(drop_leaf->hdr.count)-1].hashval);
+ drop_blk->hashval = be32_to_cpu(entry[drophdr.count - 1].hashval);
/*
* Check if we need a temp buffer, or can we do it in place.
* Note that we don't check "leaf" for holes because we will
* always be dropping it, toosmall() decided that for us already.
*/
- if (save_hdr->holes == 0) {
+ if (savehdr.holes == 0) {
/*
* dest leaf has no holes, so we add there. May need
* to make some room in the entry array.
*/
- if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) {
- xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0,
- be16_to_cpu(drop_hdr->count), mp);
+ if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
+ drop_blk->bp, &drophdr)) {
+ xfs_attr3_leaf_moveents(state->args,
+ drop_leaf, &drophdr, 0,
+ save_leaf, &savehdr, 0,
+ drophdr.count);
} else {
- xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf,
- be16_to_cpu(save_hdr->count),
- be16_to_cpu(drop_hdr->count), mp);
+ xfs_attr3_leaf_moveents(state->args,
+ drop_leaf, &drophdr, 0,
+ save_leaf, &savehdr,
+ savehdr.count, drophdr.count);
}
} else {
/*
* Destination has holes, so we make a temporary copy
* of the leaf and add them both to that.
*/
- tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP);
- ASSERT(tmpbuffer != NULL);
- memset(tmpbuffer, 0, state->blocksize);
- tmp_leaf = (xfs_attr_leafblock_t *)tmpbuffer;
- tmp_hdr = &tmp_leaf->hdr;
- tmp_hdr->info = save_hdr->info; /* struct copy */
- tmp_hdr->count = 0;
- tmp_hdr->firstused = cpu_to_be16(state->blocksize);
- if (!tmp_hdr->firstused) {
- tmp_hdr->firstused = cpu_to_be16(
- state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN);
- }
- tmp_hdr->usedbytes = 0;
- if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) {
- xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0,
- be16_to_cpu(drop_hdr->count), mp);
- xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf,
- be16_to_cpu(tmp_leaf->hdr.count),
- be16_to_cpu(save_hdr->count), mp);
+ struct xfs_attr_leafblock *tmp_leaf;
+ struct xfs_attr3_icleaf_hdr tmphdr;
+
+ tmp_leaf = kmem_zalloc(state->args->geo->blksize, KM_SLEEP);
+
+ /*
+ * Copy the header into the temp leaf so that all the stuff
+ * not in the incore header is present and gets copied back in
+ * once we've moved all the entries.
+ */
+ memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf));
+
+ memset(&tmphdr, 0, sizeof(tmphdr));
+ tmphdr.magic = savehdr.magic;
+ tmphdr.forw = savehdr.forw;
+ tmphdr.back = savehdr.back;
+ tmphdr.firstused = state->args->geo->blksize;
+
+ /* write the header to the temp buffer to initialise it */
+ xfs_attr3_leaf_hdr_to_disk(tmp_leaf, &tmphdr);
+
+ if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
+ drop_blk->bp, &drophdr)) {
+ xfs_attr3_leaf_moveents(state->args,
+ drop_leaf, &drophdr, 0,
+ tmp_leaf, &tmphdr, 0,
+ drophdr.count);
+ xfs_attr3_leaf_moveents(state->args,
+ save_leaf, &savehdr, 0,
+ tmp_leaf, &tmphdr, tmphdr.count,
+ savehdr.count);
} else {
- xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0,
- be16_to_cpu(save_hdr->count), mp);
- xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf,
- be16_to_cpu(tmp_leaf->hdr.count),
- be16_to_cpu(drop_hdr->count), mp);
+ xfs_attr3_leaf_moveents(state->args,
+ save_leaf, &savehdr, 0,
+ tmp_leaf, &tmphdr, 0,
+ savehdr.count);
+ xfs_attr3_leaf_moveents(state->args,
+ drop_leaf, &drophdr, 0,
+ tmp_leaf, &tmphdr, tmphdr.count,
+ drophdr.count);
}
- memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize);
- kmem_free(tmpbuffer, state->blocksize);
+ memcpy(save_leaf, tmp_leaf, state->args->geo->blksize);
+ savehdr = tmphdr; /* struct copy */
+ kmem_free(tmp_leaf);
}
- xfs_da_log_buf(state->args->trans, save_blk->bp, 0,
- state->blocksize - 1);
+ xfs_attr3_leaf_hdr_to_disk(save_leaf, &savehdr);
+ xfs_trans_log_buf(state->args->trans, save_blk->bp, 0,
+ state->args->geo->blksize - 1);
/*
* Copy out last hashval in each block for B-tree code.
*/
- save_blk->hashval = be32_to_cpu(
- save_leaf->entries[be16_to_cpu(save_leaf->hdr.count)-1].hashval);
+ entry = xfs_attr3_leaf_entryp(save_leaf);
+ save_blk->hashval = be32_to_cpu(entry[savehdr.count - 1].hashval);
}
/*========================================================================
@@ -1952,27 +2070,33 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
* Don't change the args->value unless we find the attribute.
*/
int
-xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
+xfs_attr3_leaf_lookup_int(
+ struct xfs_buf *bp,
+ struct xfs_da_args *args)
{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_entry_t *entry;
- xfs_attr_leaf_name_local_t *name_loc;
- xfs_attr_leaf_name_remote_t *name_rmt;
- int probe, span;
- xfs_dahash_t hashval;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_attr_leaf_entry *entry;
+ struct xfs_attr_leaf_entry *entries;
+ struct xfs_attr_leaf_name_local *name_loc;
+ struct xfs_attr_leaf_name_remote *name_rmt;
+ xfs_dahash_t hashval;
+ int probe;
+ int span;
+
+ trace_xfs_attr_leaf_lookup(args);
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(leaf->hdr.count)
- < (XFS_LBSIZE(args->dp->i_mount)/8));
+ leaf = bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ entries = xfs_attr3_leaf_entryp(leaf);
+ ASSERT(ichdr.count < args->geo->blksize / 8);
/*
* Binary search. (note: small blocks will skip this loop)
*/
hashval = args->hashval;
- probe = span = be16_to_cpu(leaf->hdr.count) / 2;
- for (entry = &leaf->entries[probe]; span > 4;
- entry = &leaf->entries[probe]) {
+ probe = span = ichdr.count / 2;
+ for (entry = &entries[probe]; span > 4; entry = &entries[probe]) {
span /= 2;
if (be32_to_cpu(entry->hashval) < hashval)
probe += span;
@@ -1981,35 +2105,31 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
else
break;
}
- ASSERT((probe >= 0) &&
- (!leaf->hdr.count
- || (probe < be16_to_cpu(leaf->hdr.count))));
- ASSERT((span <= 4) || (be32_to_cpu(entry->hashval) == hashval));
+ ASSERT(probe >= 0 && (!ichdr.count || probe < ichdr.count));
+ ASSERT(span <= 4 || be32_to_cpu(entry->hashval) == hashval);
/*
* Since we may have duplicate hashval's, find the first matching
* hashval in the leaf.
*/
- while ((probe > 0) && (be32_to_cpu(entry->hashval) >= hashval)) {
+ while (probe > 0 && be32_to_cpu(entry->hashval) >= hashval) {
entry--;
probe--;
}
- while ((probe < be16_to_cpu(leaf->hdr.count)) &&
- (be32_to_cpu(entry->hashval) < hashval)) {
+ while (probe < ichdr.count &&
+ be32_to_cpu(entry->hashval) < hashval) {
entry++;
probe++;
}
- if ((probe == be16_to_cpu(leaf->hdr.count)) ||
- (be32_to_cpu(entry->hashval) != hashval)) {
+ if (probe == ichdr.count || be32_to_cpu(entry->hashval) != hashval) {
args->index = probe;
- return(XFS_ERROR(ENOATTR));
+ return XFS_ERROR(ENOATTR);
}
/*
* Duplicate keys may be present, so search all of them for a match.
*/
- for ( ; (probe < be16_to_cpu(leaf->hdr.count)) &&
- (be32_to_cpu(entry->hashval) == hashval);
+ for (; probe < ichdr.count && (be32_to_cpu(entry->hashval) == hashval);
entry++, probe++) {
/*
* GROT: Add code to remove incomplete entries.
@@ -2023,33 +2143,36 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
continue;
}
if (entry->flags & XFS_ATTR_LOCAL) {
- name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, probe);
+ name_loc = xfs_attr3_leaf_name_local(leaf, probe);
if (name_loc->namelen != args->namelen)
continue;
- if (memcmp(args->name, (char *)name_loc->nameval, args->namelen) != 0)
+ if (memcmp(args->name, name_loc->nameval,
+ args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, entry->flags))
continue;
args->index = probe;
- return(XFS_ERROR(EEXIST));
+ return XFS_ERROR(EEXIST);
} else {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, probe);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, probe);
if (name_rmt->namelen != args->namelen)
continue;
- if (memcmp(args->name, (char *)name_rmt->name,
- args->namelen) != 0)
+ if (memcmp(args->name, name_rmt->name,
+ args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, entry->flags))
continue;
args->index = probe;
+ args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
- args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
- be32_to_cpu(name_rmt->valuelen));
- return(XFS_ERROR(EEXIST));
+ args->rmtblkcnt = xfs_attr3_rmt_blocks(
+ args->dp->i_mount,
+ args->rmtvaluelen);
+ return XFS_ERROR(EEXIST);
}
}
args->index = probe;
- return(XFS_ERROR(ENOATTR));
+ return XFS_ERROR(ENOATTR);
}
/*
@@ -2057,54 +2180,57 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
* list structure.
*/
int
-xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
+xfs_attr3_leaf_getvalue(
+ struct xfs_buf *bp,
+ struct xfs_da_args *args)
{
- int valuelen;
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_entry_t *entry;
- xfs_attr_leaf_name_local_t *name_loc;
- xfs_attr_leaf_name_remote_t *name_rmt;
-
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(leaf->hdr.count)
- < (XFS_LBSIZE(args->dp->i_mount)/8));
- ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
-
- entry = &leaf->entries[args->index];
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_attr_leaf_entry *entry;
+ struct xfs_attr_leaf_name_local *name_loc;
+ struct xfs_attr_leaf_name_remote *name_rmt;
+ int valuelen;
+
+ leaf = bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ ASSERT(ichdr.count < args->geo->blksize / 8);
+ ASSERT(args->index < ichdr.count);
+
+ entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
if (entry->flags & XFS_ATTR_LOCAL) {
- name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
+ name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
ASSERT(name_loc->namelen == args->namelen);
ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
valuelen = be16_to_cpu(name_loc->valuelen);
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = valuelen;
- return(0);
+ return 0;
}
if (args->valuelen < valuelen) {
args->valuelen = valuelen;
- return(XFS_ERROR(ERANGE));
+ return XFS_ERROR(ERANGE);
}
args->valuelen = valuelen;
memcpy(args->value, &name_loc->nameval[args->namelen], valuelen);
} else {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
ASSERT(name_rmt->namelen == args->namelen);
ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
- valuelen = be32_to_cpu(name_rmt->valuelen);
+ args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
- args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
+ args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
+ args->rmtvaluelen);
if (args->flags & ATTR_KERNOVAL) {
- args->valuelen = valuelen;
- return(0);
+ args->valuelen = args->rmtvaluelen;
+ return 0;
}
- if (args->valuelen < valuelen) {
- args->valuelen = valuelen;
- return(XFS_ERROR(ERANGE));
+ if (args->valuelen < args->rmtvaluelen) {
+ args->valuelen = args->rmtvaluelen;
+ return XFS_ERROR(ERANGE);
}
- args->valuelen = valuelen;
+ args->valuelen = args->rmtvaluelen;
}
- return(0);
+ return 0;
}
/*========================================================================
@@ -2117,13 +2243,21 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
*/
/*ARGSUSED*/
STATIC void
-xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
- xfs_attr_leafblock_t *leaf_d, int start_d,
- int count, xfs_mount_t *mp)
+xfs_attr3_leaf_moveents(
+ struct xfs_da_args *args,
+ struct xfs_attr_leafblock *leaf_s,
+ struct xfs_attr3_icleaf_hdr *ichdr_s,
+ int start_s,
+ struct xfs_attr_leafblock *leaf_d,
+ struct xfs_attr3_icleaf_hdr *ichdr_d,
+ int start_d,
+ int count)
{
- xfs_attr_leaf_hdr_t *hdr_s, *hdr_d;
- xfs_attr_leaf_entry_t *entry_s, *entry_d;
- int desti, tmp, i;
+ struct xfs_attr_leaf_entry *entry_s;
+ struct xfs_attr_leaf_entry *entry_d;
+ int desti;
+ int tmp;
+ int i;
/*
* Check for nothing to do.
@@ -2134,45 +2268,41 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
/*
* Set up environment.
*/
- ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- hdr_s = &leaf_s->hdr;
- hdr_d = &leaf_d->hdr;
- ASSERT((be16_to_cpu(hdr_s->count) > 0) &&
- (be16_to_cpu(hdr_s->count) < (XFS_LBSIZE(mp)/8)));
- ASSERT(be16_to_cpu(hdr_s->firstused) >=
- ((be16_to_cpu(hdr_s->count)
- * sizeof(*entry_s))+sizeof(*hdr_s)));
- ASSERT(be16_to_cpu(hdr_d->count) < (XFS_LBSIZE(mp)/8));
- ASSERT(be16_to_cpu(hdr_d->firstused) >=
- ((be16_to_cpu(hdr_d->count)
- * sizeof(*entry_d))+sizeof(*hdr_d)));
-
- ASSERT(start_s < be16_to_cpu(hdr_s->count));
- ASSERT(start_d <= be16_to_cpu(hdr_d->count));
- ASSERT(count <= be16_to_cpu(hdr_s->count));
+ ASSERT(ichdr_s->magic == XFS_ATTR_LEAF_MAGIC ||
+ ichdr_s->magic == XFS_ATTR3_LEAF_MAGIC);
+ ASSERT(ichdr_s->magic == ichdr_d->magic);
+ ASSERT(ichdr_s->count > 0 && ichdr_s->count < args->geo->blksize / 8);
+ ASSERT(ichdr_s->firstused >= (ichdr_s->count * sizeof(*entry_s))
+ + xfs_attr3_leaf_hdr_size(leaf_s));
+ ASSERT(ichdr_d->count < args->geo->blksize / 8);
+ ASSERT(ichdr_d->firstused >= (ichdr_d->count * sizeof(*entry_d))
+ + xfs_attr3_leaf_hdr_size(leaf_d));
+
+ ASSERT(start_s < ichdr_s->count);
+ ASSERT(start_d <= ichdr_d->count);
+ ASSERT(count <= ichdr_s->count);
+
/*
* Move the entries in the destination leaf up to make a hole?
*/
- if (start_d < be16_to_cpu(hdr_d->count)) {
- tmp = be16_to_cpu(hdr_d->count) - start_d;
+ if (start_d < ichdr_d->count) {
+ tmp = ichdr_d->count - start_d;
tmp *= sizeof(xfs_attr_leaf_entry_t);
- entry_s = &leaf_d->entries[start_d];
- entry_d = &leaf_d->entries[start_d + count];
- memmove((char *)entry_d, (char *)entry_s, tmp);
+ entry_s = &xfs_attr3_leaf_entryp(leaf_d)[start_d];
+ entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d + count];
+ memmove(entry_d, entry_s, tmp);
}
/*
* Copy all entry's in the same (sorted) order,
* but allocate attribute info packed and in sequence.
*/
- entry_s = &leaf_s->entries[start_s];
- entry_d = &leaf_d->entries[start_d];
+ entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
+ entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d];
desti = start_d;
for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) {
- ASSERT(be16_to_cpu(entry_s->nameidx)
- >= be16_to_cpu(hdr_s->firstused));
+ ASSERT(be16_to_cpu(entry_s->nameidx) >= ichdr_s->firstused);
tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i);
#ifdef GROT
/*
@@ -2181,36 +2311,34 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
* off for 6.2, should be revisited later.
*/
if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
- memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
- be16_add_cpu(&hdr_s->usedbytes, -tmp);
- be16_add_cpu(&hdr_s->count, -1);
+ memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp);
+ ichdr_s->usedbytes -= tmp;
+ ichdr_s->count -= 1;
entry_d--; /* to compensate for ++ in loop hdr */
desti--;
if ((start_s + i) < offset)
result++; /* insertion index adjustment */
} else {
#endif /* GROT */
- be16_add_cpu(&hdr_d->firstused, -tmp);
+ ichdr_d->firstused -= tmp;
/* both on-disk, don't endian flip twice */
entry_d->hashval = entry_s->hashval;
- /* both on-disk, don't endian flip twice */
- entry_d->nameidx = hdr_d->firstused;
+ entry_d->nameidx = cpu_to_be16(ichdr_d->firstused);
entry_d->flags = entry_s->flags;
ASSERT(be16_to_cpu(entry_d->nameidx) + tmp
- <= XFS_LBSIZE(mp));
- memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti),
- XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp);
+ <= args->geo->blksize);
+ memmove(xfs_attr3_leaf_name(leaf_d, desti),
+ xfs_attr3_leaf_name(leaf_s, start_s + i), tmp);
ASSERT(be16_to_cpu(entry_s->nameidx) + tmp
- <= XFS_LBSIZE(mp));
- memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
- be16_add_cpu(&hdr_s->usedbytes, -tmp);
- be16_add_cpu(&hdr_d->usedbytes, tmp);
- be16_add_cpu(&hdr_s->count, -1);
- be16_add_cpu(&hdr_d->count, 1);
- tmp = be16_to_cpu(hdr_d->count)
- * sizeof(xfs_attr_leaf_entry_t)
- + sizeof(xfs_attr_leaf_hdr_t);
- ASSERT(be16_to_cpu(hdr_d->firstused) >= tmp);
+ <= args->geo->blksize);
+ memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp);
+ ichdr_s->usedbytes -= tmp;
+ ichdr_d->usedbytes += tmp;
+ ichdr_s->count -= 1;
+ ichdr_d->count += 1;
+ tmp = ichdr_d->count * sizeof(xfs_attr_leaf_entry_t)
+ + xfs_attr3_leaf_hdr_size(leaf_d);
+ ASSERT(ichdr_d->firstused >= tmp);
#ifdef GROT
}
#endif /* GROT */
@@ -2219,86 +2347,60 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
/*
* Zero out the entries we just copied.
*/
- if (start_s == be16_to_cpu(hdr_s->count)) {
+ if (start_s == ichdr_s->count) {
tmp = count * sizeof(xfs_attr_leaf_entry_t);
- entry_s = &leaf_s->entries[start_s];
+ entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
ASSERT(((char *)entry_s + tmp) <=
- ((char *)leaf_s + XFS_LBSIZE(mp)));
- memset((char *)entry_s, 0, tmp);
+ ((char *)leaf_s + args->geo->blksize));
+ memset(entry_s, 0, tmp);
} else {
/*
* Move the remaining entries down to fill the hole,
* then zero the entries at the top.
*/
- tmp = be16_to_cpu(hdr_s->count) - count;
- tmp *= sizeof(xfs_attr_leaf_entry_t);
- entry_s = &leaf_s->entries[start_s + count];
- entry_d = &leaf_s->entries[start_s];
- memmove((char *)entry_d, (char *)entry_s, tmp);
+ tmp = (ichdr_s->count - count) * sizeof(xfs_attr_leaf_entry_t);
+ entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s + count];
+ entry_d = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
+ memmove(entry_d, entry_s, tmp);
tmp = count * sizeof(xfs_attr_leaf_entry_t);
- entry_s = &leaf_s->entries[be16_to_cpu(hdr_s->count)];
+ entry_s = &xfs_attr3_leaf_entryp(leaf_s)[ichdr_s->count];
ASSERT(((char *)entry_s + tmp) <=
- ((char *)leaf_s + XFS_LBSIZE(mp)));
- memset((char *)entry_s, 0, tmp);
+ ((char *)leaf_s + args->geo->blksize));
+ memset(entry_s, 0, tmp);
}
/*
* Fill in the freemap information
*/
- hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
- be16_add_cpu(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) *
- sizeof(xfs_attr_leaf_entry_t));
- hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused)
- - be16_to_cpu(hdr_d->freemap[0].base));
- hdr_d->freemap[1].base = 0;
- hdr_d->freemap[2].base = 0;
- hdr_d->freemap[1].size = 0;
- hdr_d->freemap[2].size = 0;
- hdr_s->holes = 1; /* leaf may not be compact */
-}
-
-/*
- * Compare two leaf blocks "order".
- * Return 0 unless leaf2 should go before leaf1.
- */
-int
-xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
-{
- xfs_attr_leafblock_t *leaf1, *leaf2;
-
- leaf1 = leaf1_bp->data;
- leaf2 = leaf2_bp->data;
- ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) &&
- (be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC));
- if ((be16_to_cpu(leaf1->hdr.count) > 0) &&
- (be16_to_cpu(leaf2->hdr.count) > 0) &&
- ((be32_to_cpu(leaf2->entries[0].hashval) <
- be32_to_cpu(leaf1->entries[0].hashval)) ||
- (be32_to_cpu(leaf2->entries[
- be16_to_cpu(leaf2->hdr.count)-1].hashval) <
- be32_to_cpu(leaf1->entries[
- be16_to_cpu(leaf1->hdr.count)-1].hashval)))) {
- return(1);
- }
- return(0);
+ ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_d);
+ ichdr_d->freemap[0].base += ichdr_d->count * sizeof(xfs_attr_leaf_entry_t);
+ ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base;
+ ichdr_d->freemap[1].base = 0;
+ ichdr_d->freemap[2].base = 0;
+ ichdr_d->freemap[1].size = 0;
+ ichdr_d->freemap[2].size = 0;
+ ichdr_s->holes = 1; /* leaf may not be compact */
}
/*
* Pick up the last hashvalue from a leaf block.
*/
xfs_dahash_t
-xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count)
+xfs_attr_leaf_lasthash(
+ struct xfs_buf *bp,
+ int *count)
{
- xfs_attr_leafblock_t *leaf;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_attr_leaf_entry *entries;
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, bp->b_addr);
+ entries = xfs_attr3_leaf_entryp(bp->b_addr);
if (count)
- *count = be16_to_cpu(leaf->hdr.count);
- if (!leaf->hdr.count)
- return(0);
- return be32_to_cpu(leaf->entries[be16_to_cpu(leaf->hdr.count)-1].hashval);
+ *count = ichdr.count;
+ if (!ichdr.count)
+ return 0;
+ return be32_to_cpu(entries[ichdr.count - 1].hashval);
}
/*
@@ -2308,20 +2410,21 @@ xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count)
STATIC int
xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
{
+ struct xfs_attr_leaf_entry *entries;
xfs_attr_leaf_name_local_t *name_loc;
xfs_attr_leaf_name_remote_t *name_rmt;
int size;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- if (leaf->entries[index].flags & XFS_ATTR_LOCAL) {
- name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index);
- size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen,
+ entries = xfs_attr3_leaf_entryp(leaf);
+ if (entries[index].flags & XFS_ATTR_LOCAL) {
+ name_loc = xfs_attr3_leaf_name_local(leaf, index);
+ size = xfs_attr_leaf_entsize_local(name_loc->namelen,
be16_to_cpu(name_loc->valuelen));
} else {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index);
- size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, index);
+ size = xfs_attr_leaf_entsize_remote(name_rmt->namelen);
}
- return(size);
+ return size;
}
/*
@@ -2331,145 +2434,21 @@ xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
* a "local" or a "remote" attribute.
*/
int
-xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, int *local)
+xfs_attr_leaf_newentsize(
+ struct xfs_da_args *args,
+ int *local)
{
- int size;
+ int size;
- size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(namelen, valuelen);
- if (size < XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(blocksize)) {
- if (local) {
+ size = xfs_attr_leaf_entsize_local(args->namelen, args->valuelen);
+ if (size < xfs_attr_leaf_entsize_local_max(args->geo->blksize)) {
+ if (local)
*local = 1;
- }
- } else {
- size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(namelen);
- if (local) {
- *local = 0;
- }
- }
- return(size);
-}
-
-/*
- * Copy out attribute list entries for attr_list(), for leaf attribute lists.
- */
-int
-xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
-{
- attrlist_cursor_kern_t *cursor;
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_entry_t *entry;
- int retval, i;
-
- ASSERT(bp != NULL);
- leaf = bp->data;
- cursor = context->cursor;
- cursor->initted = 1;
-
- xfs_attr_trace_l_cl("blk start", context, leaf);
-
- /*
- * Re-find our place in the leaf block if this is a new syscall.
- */
- if (context->resynch) {
- entry = &leaf->entries[0];
- for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
- if (be32_to_cpu(entry->hashval) == cursor->hashval) {
- if (cursor->offset == context->dupcnt) {
- context->dupcnt = 0;
- break;
- }
- context->dupcnt++;
- } else if (be32_to_cpu(entry->hashval) >
- cursor->hashval) {
- context->dupcnt = 0;
- break;
- }
- }
- if (i == be16_to_cpu(leaf->hdr.count)) {
- xfs_attr_trace_l_c("not found", context);
- return(0);
- }
- } else {
- entry = &leaf->entries[0];
- i = 0;
- }
- context->resynch = 0;
-
- /*
- * We have found our place, start copying out the new attributes.
- */
- retval = 0;
- for ( ; (i < be16_to_cpu(leaf->hdr.count)); entry++, i++) {
- attrnames_t *namesp;
-
- if (be32_to_cpu(entry->hashval) != cursor->hashval) {
- cursor->hashval = be32_to_cpu(entry->hashval);
- cursor->offset = 0;
- }
-
- if (entry->flags & XFS_ATTR_INCOMPLETE)
- continue; /* skip incomplete entries */
- if (!xfs_attr_namesp_match_overrides(context->flags, entry->flags))
- continue;
-
- namesp = xfs_attr_flags_namesp(entry->flags);
-
- if (entry->flags & XFS_ATTR_LOCAL) {
- xfs_attr_leaf_name_local_t *name_loc =
- XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
-
- retval = context->put_listent(context,
- namesp,
- (char *)name_loc->nameval,
- (int)name_loc->namelen,
- be16_to_cpu(name_loc->valuelen),
- (char *)&name_loc->nameval[name_loc->namelen]);
- if (retval)
- return retval;
- } else {
- xfs_attr_leaf_name_remote_t *name_rmt =
- XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
-
- int valuelen = be32_to_cpu(name_rmt->valuelen);
-
- if (context->put_value) {
- xfs_da_args_t args;
-
- memset((char *)&args, 0, sizeof(args));
- args.dp = context->dp;
- args.whichfork = XFS_ATTR_FORK;
- args.valuelen = valuelen;
- args.value = kmem_alloc(valuelen, KM_SLEEP);
- args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
- args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
- retval = xfs_attr_rmtval_get(&args);
- if (retval)
- return retval;
- retval = context->put_listent(context,
- namesp,
- (char *)name_rmt->name,
- (int)name_rmt->namelen,
- valuelen,
- (char*)args.value);
- kmem_free(args.value, valuelen);
- }
- else {
- retval = context->put_listent(context,
- namesp,
- (char *)name_rmt->name,
- (int)name_rmt->namelen,
- valuelen,
- NULL);
- }
- if (retval)
- return retval;
- }
- if (context->seen_enough)
- break;
- cursor->offset++;
+ return size;
}
- xfs_attr_trace_l_cl("blk end", context, leaf);
- return(retval);
+ if (local)
+ *local = 0;
+ return xfs_attr_leaf_entsize_remote(args->namelen);
}
@@ -2481,43 +2460,44 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
* Clear the INCOMPLETE flag on an entry in a leaf block.
*/
int
-xfs_attr_leaf_clearflag(xfs_da_args_t *args)
+xfs_attr3_leaf_clearflag(
+ struct xfs_da_args *args)
{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_entry_t *entry;
- xfs_attr_leaf_name_remote_t *name_rmt;
- xfs_dabuf_t *bp;
- int error;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr_leaf_entry *entry;
+ struct xfs_attr_leaf_name_remote *name_rmt;
+ struct xfs_buf *bp;
+ int error;
#ifdef DEBUG
+ struct xfs_attr3_icleaf_hdr ichdr;
xfs_attr_leaf_name_local_t *name_loc;
int namelen;
char *name;
#endif /* DEBUG */
+ trace_xfs_attr_leaf_clearflag(args);
/*
* Set up the operation.
*/
- error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
- XFS_ATTR_FORK);
- if (error) {
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ if (error)
return(error);
- }
- ASSERT(bp != NULL);
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
- ASSERT(args->index >= 0);
- entry = &leaf->entries[ args->index ];
+ leaf = bp->b_addr;
+ entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
ASSERT(entry->flags & XFS_ATTR_INCOMPLETE);
#ifdef DEBUG
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ ASSERT(args->index < ichdr.count);
+ ASSERT(args->index >= 0);
+
if (entry->flags & XFS_ATTR_LOCAL) {
- name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
+ name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
namelen = name_loc->namelen;
name = (char *)name_loc->nameval;
} else {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
namelen = name_rmt->namelen;
name = (char *)name_rmt->name;
}
@@ -2527,74 +2507,73 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
#endif /* DEBUG */
entry->flags &= ~XFS_ATTR_INCOMPLETE;
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
if (args->rmtblkno) {
ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
- name_rmt->valuelen = cpu_to_be32(args->valuelen);
- xfs_da_log_buf(args->trans, bp,
+ name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
}
- xfs_da_buf_done(bp);
/*
* Commit the flag value change and start the next trans in series.
*/
- error = xfs_attr_rolltrans(&args->trans, args->dp);
-
- return(error);
+ return xfs_trans_roll(&args->trans, args->dp);
}
/*
* Set the INCOMPLETE flag on an entry in a leaf block.
*/
int
-xfs_attr_leaf_setflag(xfs_da_args_t *args)
+xfs_attr3_leaf_setflag(
+ struct xfs_da_args *args)
{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_entry_t *entry;
- xfs_attr_leaf_name_remote_t *name_rmt;
- xfs_dabuf_t *bp;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr_leaf_entry *entry;
+ struct xfs_attr_leaf_name_remote *name_rmt;
+ struct xfs_buf *bp;
int error;
+#ifdef DEBUG
+ struct xfs_attr3_icleaf_hdr ichdr;
+#endif
+
+ trace_xfs_attr_leaf_setflag(args);
/*
* Set up the operation.
*/
- error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
- XFS_ATTR_FORK);
- if (error) {
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ if (error)
return(error);
- }
- ASSERT(bp != NULL);
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
+ leaf = bp->b_addr;
+#ifdef DEBUG
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ ASSERT(args->index < ichdr.count);
ASSERT(args->index >= 0);
- entry = &leaf->entries[ args->index ];
+#endif
+ entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0);
entry->flags |= XFS_ATTR_INCOMPLETE;
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
if ((entry->flags & XFS_ATTR_LOCAL) == 0) {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
name_rmt->valueblk = 0;
name_rmt->valuelen = 0;
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
}
- xfs_da_buf_done(bp);
/*
* Commit the flag value change and start the next trans in series.
*/
- error = xfs_attr_rolltrans(&args->trans, args->dp);
-
- return(error);
+ return xfs_trans_roll(&args->trans, args->dp);
}
/*
@@ -2605,71 +2584,76 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
* Note that they could be in different blocks, or in the same block.
*/
int
-xfs_attr_leaf_flipflags(xfs_da_args_t *args)
+xfs_attr3_leaf_flipflags(
+ struct xfs_da_args *args)
{
- xfs_attr_leafblock_t *leaf1, *leaf2;
- xfs_attr_leaf_entry_t *entry1, *entry2;
- xfs_attr_leaf_name_remote_t *name_rmt;
- xfs_dabuf_t *bp1, *bp2;
+ struct xfs_attr_leafblock *leaf1;
+ struct xfs_attr_leafblock *leaf2;
+ struct xfs_attr_leaf_entry *entry1;
+ struct xfs_attr_leaf_entry *entry2;
+ struct xfs_attr_leaf_name_remote *name_rmt;
+ struct xfs_buf *bp1;
+ struct xfs_buf *bp2;
int error;
#ifdef DEBUG
+ struct xfs_attr3_icleaf_hdr ichdr1;
+ struct xfs_attr3_icleaf_hdr ichdr2;
xfs_attr_leaf_name_local_t *name_loc;
int namelen1, namelen2;
char *name1, *name2;
#endif /* DEBUG */
+ trace_xfs_attr_leaf_flipflags(args);
+
/*
* Read the block containing the "old" attr
*/
- error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp1,
- XFS_ATTR_FORK);
- if (error) {
- return(error);
- }
- ASSERT(bp1 != NULL);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp1);
+ if (error)
+ return error;
/*
* Read the block containing the "new" attr, if it is different
*/
if (args->blkno2 != args->blkno) {
- error = xfs_da_read_buf(args->trans, args->dp, args->blkno2,
- -1, &bp2, XFS_ATTR_FORK);
- if (error) {
- return(error);
- }
- ASSERT(bp2 != NULL);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno2,
+ -1, &bp2);
+ if (error)
+ return error;
} else {
bp2 = bp1;
}
- leaf1 = bp1->data;
- ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(args->index < be16_to_cpu(leaf1->hdr.count));
+ leaf1 = bp1->b_addr;
+ entry1 = &xfs_attr3_leaf_entryp(leaf1)[args->index];
+
+ leaf2 = bp2->b_addr;
+ entry2 = &xfs_attr3_leaf_entryp(leaf2)[args->index2];
+
+#ifdef DEBUG
+ xfs_attr3_leaf_hdr_from_disk(&ichdr1, leaf1);
+ ASSERT(args->index < ichdr1.count);
ASSERT(args->index >= 0);
- entry1 = &leaf1->entries[ args->index ];
- leaf2 = bp2->data;
- ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count));
+ xfs_attr3_leaf_hdr_from_disk(&ichdr2, leaf2);
+ ASSERT(args->index2 < ichdr2.count);
ASSERT(args->index2 >= 0);
- entry2 = &leaf2->entries[ args->index2 ];
-#ifdef DEBUG
if (entry1->flags & XFS_ATTR_LOCAL) {
- name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf1, args->index);
+ name_loc = xfs_attr3_leaf_name_local(leaf1, args->index);
namelen1 = name_loc->namelen;
name1 = (char *)name_loc->nameval;
} else {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
namelen1 = name_rmt->namelen;
name1 = (char *)name_rmt->name;
}
if (entry2->flags & XFS_ATTR_LOCAL) {
- name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf2, args->index2);
+ name_loc = xfs_attr3_leaf_name_local(leaf2, args->index2);
namelen2 = name_loc->namelen;
name2 = (char *)name_loc->nameval;
} else {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2);
namelen2 = name_rmt->namelen;
name2 = (char *)name_rmt->name;
}
@@ -2682,397 +2666,32 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0);
entry1->flags &= ~XFS_ATTR_INCOMPLETE;
- xfs_da_log_buf(args->trans, bp1,
+ xfs_trans_log_buf(args->trans, bp1,
XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1)));
if (args->rmtblkno) {
ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
- name_rmt->valuelen = cpu_to_be32(args->valuelen);
- xfs_da_log_buf(args->trans, bp1,
+ name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
+ xfs_trans_log_buf(args->trans, bp1,
XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
}
entry2->flags |= XFS_ATTR_INCOMPLETE;
- xfs_da_log_buf(args->trans, bp2,
+ xfs_trans_log_buf(args->trans, bp2,
XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2)));
if ((entry2->flags & XFS_ATTR_LOCAL) == 0) {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2);
name_rmt->valueblk = 0;
name_rmt->valuelen = 0;
- xfs_da_log_buf(args->trans, bp2,
+ xfs_trans_log_buf(args->trans, bp2,
XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt)));
}
- xfs_da_buf_done(bp1);
- if (bp1 != bp2)
- xfs_da_buf_done(bp2);
/*
* Commit the flag value change and start the next trans in series.
*/
- error = xfs_attr_rolltrans(&args->trans, args->dp);
-
- return(error);
-}
-
-/*========================================================================
- * Indiscriminately delete the entire attribute fork
- *========================================================================*/
-
-/*
- * Recurse (gasp!) through the attribute nodes until we find leaves.
- * We're doing a depth-first traversal in order to invalidate everything.
- */
-int
-xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
-{
- xfs_da_blkinfo_t *info;
- xfs_daddr_t blkno;
- xfs_dabuf_t *bp;
- int error;
-
- /*
- * Read block 0 to see what we have to work with.
- * We only get here if we have extents, since we remove
- * the extents in reverse order the extent containing
- * block 0 must still be there.
- */
- error = xfs_da_read_buf(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
- if (error)
- return(error);
- blkno = xfs_da_blkno(bp);
-
- /*
- * Invalidate the tree, even if the "tree" is only a single leaf block.
- * This is a depth-first traversal!
- */
- info = bp->data;
- if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) {
- error = xfs_attr_node_inactive(trans, dp, bp, 1);
- } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) {
- error = xfs_attr_leaf_inactive(trans, dp, bp);
- } else {
- error = XFS_ERROR(EIO);
- xfs_da_brelse(*trans, bp);
- }
- if (error)
- return(error);
-
- /*
- * Invalidate the incore copy of the root block.
- */
- error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
- if (error)
- return(error);
- xfs_da_binval(*trans, bp); /* remove from cache */
- /*
- * Commit the invalidate and start the next transaction.
- */
- error = xfs_attr_rolltrans(trans, dp);
-
- return (error);
-}
-
-/*
- * Recurse (gasp!) through the attribute nodes until we find leaves.
- * We're doing a depth-first traversal in order to invalidate everything.
- */
-STATIC int
-xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
- int level)
-{
- xfs_da_blkinfo_t *info;
- xfs_da_intnode_t *node;
- xfs_dablk_t child_fsb;
- xfs_daddr_t parent_blkno, child_blkno;
- int error, count, i;
- xfs_dabuf_t *child_bp;
-
- /*
- * Since this code is recursive (gasp!) we must protect ourselves.
- */
- if (level > XFS_DA_NODE_MAXDEPTH) {
- xfs_da_brelse(*trans, bp); /* no locks for later trans */
- return(XFS_ERROR(EIO));
- }
-
- node = bp->data;
- ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
- parent_blkno = xfs_da_blkno(bp); /* save for re-read later */
- count = be16_to_cpu(node->hdr.count);
- if (!count) {
- xfs_da_brelse(*trans, bp);
- return(0);
- }
- child_fsb = be32_to_cpu(node->btree[0].before);
- xfs_da_brelse(*trans, bp); /* no locks for later trans */
-
- /*
- * If this is the node level just above the leaves, simply loop
- * over the leaves removing all of them. If this is higher up
- * in the tree, recurse downward.
- */
- for (i = 0; i < count; i++) {
- /*
- * Read the subsidiary block to see what we have to work with.
- * Don't do this in a transaction. This is a depth-first
- * traversal of the tree so we may deal with many blocks
- * before we come back to this one.
- */
- error = xfs_da_read_buf(*trans, dp, child_fsb, -2, &child_bp,
- XFS_ATTR_FORK);
- if (error)
- return(error);
- if (child_bp) {
- /* save for re-read later */
- child_blkno = xfs_da_blkno(child_bp);
-
- /*
- * Invalidate the subtree, however we have to.
- */
- info = child_bp->data;
- if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) {
- error = xfs_attr_node_inactive(trans, dp,
- child_bp, level+1);
- } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) {
- error = xfs_attr_leaf_inactive(trans, dp,
- child_bp);
- } else {
- error = XFS_ERROR(EIO);
- xfs_da_brelse(*trans, child_bp);
- }
- if (error)
- return(error);
-
- /*
- * Remove the subsidiary block from the cache
- * and from the log.
- */
- error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
- &child_bp, XFS_ATTR_FORK);
- if (error)
- return(error);
- xfs_da_binval(*trans, child_bp);
- }
-
- /*
- * If we're not done, re-read the parent to get the next
- * child block number.
- */
- if ((i+1) < count) {
- error = xfs_da_read_buf(*trans, dp, 0, parent_blkno,
- &bp, XFS_ATTR_FORK);
- if (error)
- return(error);
- child_fsb = be32_to_cpu(node->btree[i+1].before);
- xfs_da_brelse(*trans, bp);
- }
- /*
- * Atomically commit the whole invalidate stuff.
- */
- if ((error = xfs_attr_rolltrans(trans, dp)))
- return (error);
- }
-
- return(0);
-}
-
-/*
- * Invalidate all of the "remote" value regions pointed to by a particular
- * leaf block.
- * Note that we must release the lock on the buffer so that we are not
- * caught holding something that the logging code wants to flush to disk.
- */
-STATIC int
-xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
-{
- xfs_attr_leafblock_t *leaf;
- xfs_attr_leaf_entry_t *entry;
- xfs_attr_leaf_name_remote_t *name_rmt;
- xfs_attr_inactive_list_t *list, *lp;
- int error, count, size, tmp, i;
-
- leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
-
- /*
- * Count the number of "remote" value extents.
- */
- count = 0;
- entry = &leaf->entries[0];
- for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
- if (be16_to_cpu(entry->nameidx) &&
- ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
- if (name_rmt->valueblk)
- count++;
- }
- }
-
- /*
- * If there are no "remote" values, we're done.
- */
- if (count == 0) {
- xfs_da_brelse(*trans, bp);
- return(0);
- }
-
- /*
- * Allocate storage for a list of all the "remote" value extents.
- */
- size = count * sizeof(xfs_attr_inactive_list_t);
- list = (xfs_attr_inactive_list_t *)kmem_alloc(size, KM_SLEEP);
-
- /*
- * Identify each of the "remote" value extents.
- */
- lp = list;
- entry = &leaf->entries[0];
- for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
- if (be16_to_cpu(entry->nameidx) &&
- ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
- name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
- if (name_rmt->valueblk) {
- lp->valueblk = be32_to_cpu(name_rmt->valueblk);
- lp->valuelen = XFS_B_TO_FSB(dp->i_mount,
- be32_to_cpu(name_rmt->valuelen));
- lp++;
- }
- }
- }
- xfs_da_brelse(*trans, bp); /* unlock for trans. in freextent() */
-
- /*
- * Invalidate each of the "remote" value extents.
- */
- error = 0;
- for (lp = list, i = 0; i < count; i++, lp++) {
- tmp = xfs_attr_leaf_freextent(trans, dp,
- lp->valueblk, lp->valuelen);
-
- if (error == 0)
- error = tmp; /* save only the 1st errno */
- }
-
- kmem_free((xfs_caddr_t)list, size);
- return(error);
-}
-
-/*
- * Look at all the extents for this logical region,
- * invalidate any buffers that are incore/in transactions.
- */
-STATIC int
-xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
- xfs_dablk_t blkno, int blkcnt)
-{
- xfs_bmbt_irec_t map;
- xfs_dablk_t tblkno;
- int tblkcnt, dblkcnt, nmap, error;
- xfs_daddr_t dblkno;
- xfs_buf_t *bp;
-
- /*
- * Roll through the "value", invalidating the attribute value's
- * blocks.
- */
- tblkno = blkno;
- tblkcnt = blkcnt;
- while (tblkcnt > 0) {
- /*
- * Try to remember where we decided to put the value.
- */
- nmap = 1;
- error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt,
- XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- NULL, 0, &map, &nmap, NULL, NULL);
- if (error) {
- return(error);
- }
- ASSERT(nmap == 1);
- ASSERT(map.br_startblock != DELAYSTARTBLOCK);
-
- /*
- * If it's a hole, these are already unmapped
- * so there's nothing to invalidate.
- */
- if (map.br_startblock != HOLESTARTBLOCK) {
-
- dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
- map.br_startblock);
- dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
- map.br_blockcount);
- bp = xfs_trans_get_buf(*trans,
- dp->i_mount->m_ddev_targp,
- dblkno, dblkcnt, XFS_BUF_LOCK);
- xfs_trans_binval(*trans, bp);
- /*
- * Roll to next transaction.
- */
- if ((error = xfs_attr_rolltrans(trans, dp)))
- return (error);
- }
-
- tblkno += map.br_blockcount;
- tblkcnt -= map.br_blockcount;
- }
-
- return(0);
-}
-
-
-/*
- * Roll from one trans in the sequence of PERMANENT transactions to the next.
- */
-int
-xfs_attr_rolltrans(xfs_trans_t **transp, xfs_inode_t *dp)
-{
- xfs_trans_t *trans;
- unsigned int logres, count;
- int error;
-
- /*
- * Ensure that the inode is always logged.
- */
- trans = *transp;
- xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
-
- /*
- * Copy the critical parameters from one trans to the next.
- */
- logres = trans->t_log_res;
- count = trans->t_log_count;
- *transp = xfs_trans_dup(trans);
-
- /*
- * Commit the current transaction.
- * If this commit failed, then it'd just unlock those items that
- * are not marked ihold. That also means that a filesystem shutdown
- * is in progress. The caller takes the responsibility to cancel
- * the duplicate transaction that gets returned.
- */
- if ((error = xfs_trans_commit(trans, 0)))
- return (error);
-
- trans = *transp;
-
- /*
- * Reserve space in the log for th next transaction.
- * This also pushes items in the "AIL", the list of logged items,
- * out to disk if they are taking up space at the tail of the log
- * that we want to use. This requires that either nothing be locked
- * across this call, or that anything that is locked be logged in
- * the prior and the next transactions.
- */
- error = xfs_trans_reserve(trans, 0, logres, 0,
- XFS_TRANS_PERM_LOG_RES, count);
- /*
- * Ensure that the inode is in the new transaction and locked.
- */
- if (!error) {
- xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(trans, dp);
- }
- return (error);
+ error = xfs_trans_roll(&args->trans, args->dp);
+ return error;
}
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index 040f732ce1e..e2929da7c3b 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -18,219 +19,15 @@
#ifndef __XFS_ATTR_LEAF_H__
#define __XFS_ATTR_LEAF_H__
-/*
- * Attribute storage layout, internal structure, access macros, etc.
- *
- * Attribute lists are structured around Btrees where all the data
- * elements are in the leaf nodes. Attribute names are hashed into an int,
- * then that int is used as the index into the Btree. Since the hashval
- * of an attribute name may not be unique, we may have duplicate keys. The
- * internal links in the Btree are logical block offsets into the file.
- */
-
struct attrlist;
struct attrlist_cursor_kern;
-struct attrnames;
-struct xfs_dabuf;
+struct xfs_attr_list_context;
struct xfs_da_args;
struct xfs_da_state;
struct xfs_da_state_blk;
struct xfs_inode;
struct xfs_trans;
-/*========================================================================
- * Attribute structure when equal to XFS_LBSIZE(mp) bytes.
- *========================================================================*/
-
-/*
- * This is the structure of the leaf nodes in the Btree.
- *
- * Struct leaf_entry's are packed from the top. Name/values grow from the
- * bottom but are not packed. The freemap contains run-length-encoded entries
- * for the free bytes after the leaf_entry's, but only the N largest such,
- * smaller runs are dropped. When the freemap doesn't show enough space
- * for an allocation, we compact the name/value area and try again. If we
- * still don't have enough space, then we have to split the block. The
- * name/value structs (both local and remote versions) must be 32bit aligned.
- *
- * Since we have duplicate hash keys, for each key that matches, compare
- * the actual name string. The root and intermediate node search always
- * takes the first-in-the-block key match found, so we should only have
- * to work "forw"ard. If none matches, continue with the "forw"ard leaf
- * nodes until the hash key changes or the attribute name is found.
- *
- * We store the fact that an attribute is a ROOT/USER/SECURE attribute in
- * the leaf_entry. The namespaces are independent only because we also look
- * at the namespace bit when we are looking for a matching attribute name.
- *
- * We also store an "incomplete" bit in the leaf_entry. It shows that an
- * attribute is in the middle of being created and should not be shown to
- * the user if we crash during the time that the bit is set. We clear the
- * bit when we have finished setting up the attribute. We do this because
- * we cannot create some large attributes inside a single transaction, and we
- * need some indication that we weren't finished if we crash in the middle.
- */
-#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */
-
-typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */
- __be16 base; /* base of free region */
- __be16 size; /* length of free region */
-} xfs_attr_leaf_map_t;
-
-typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */
- xfs_da_blkinfo_t info; /* block type, links, etc. */
- __be16 count; /* count of active leaf_entry's */
- __be16 usedbytes; /* num bytes of names/values stored */
- __be16 firstused; /* first used byte in name area */
- __u8 holes; /* != 0 if blk needs compaction */
- __u8 pad1;
- xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
- /* N largest free regions */
-} xfs_attr_leaf_hdr_t;
-
-typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */
- __be32 hashval; /* hash value of name */
- __be16 nameidx; /* index into buffer of name/value */
- __u8 flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
- __u8 pad2; /* unused pad byte */
-} xfs_attr_leaf_entry_t;
-
-typedef struct xfs_attr_leaf_name_local {
- __be16 valuelen; /* number of bytes in value */
- __u8 namelen; /* length of name bytes */
- __u8 nameval[1]; /* name/value bytes */
-} xfs_attr_leaf_name_local_t;
-
-typedef struct xfs_attr_leaf_name_remote {
- __be32 valueblk; /* block number of value bytes */
- __be32 valuelen; /* number of bytes in value */
- __u8 namelen; /* length of name bytes */
- __u8 name[1]; /* name bytes */
-} xfs_attr_leaf_name_remote_t;
-
-typedef struct xfs_attr_leafblock {
- xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */
- xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */
- xfs_attr_leaf_name_local_t namelist; /* grows from bottom of buf */
- xfs_attr_leaf_name_remote_t valuelist; /* grows from bottom of buf */
-} xfs_attr_leafblock_t;
-
-/*
- * Flags used in the leaf_entry[i].flags field.
- * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
- * on the system call, they are "or"ed together for various operations.
- */
-#define XFS_ATTR_LOCAL_BIT 0 /* attr is stored locally */
-#define XFS_ATTR_ROOT_BIT 1 /* limit access to trusted attrs */
-#define XFS_ATTR_SECURE_BIT 2 /* limit access to secure attrs */
-#define XFS_ATTR_INCOMPLETE_BIT 7 /* attr in middle of create/delete */
-#define XFS_ATTR_LOCAL (1 << XFS_ATTR_LOCAL_BIT)
-#define XFS_ATTR_ROOT (1 << XFS_ATTR_ROOT_BIT)
-#define XFS_ATTR_SECURE (1 << XFS_ATTR_SECURE_BIT)
-#define XFS_ATTR_INCOMPLETE (1 << XFS_ATTR_INCOMPLETE_BIT)
-
-/*
- * Conversion macros for converting namespace bits from argument flags
- * to ondisk flags.
- */
-#define XFS_ATTR_NSP_ARGS_MASK (ATTR_ROOT | ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK_MASK (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK(flags) ((flags) & XFS_ATTR_NSP_ONDISK_MASK)
-#define XFS_ATTR_NSP_ARGS(flags) ((flags) & XFS_ATTR_NSP_ARGS_MASK)
-#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x) (((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
- ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
-#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x) (((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
- ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
-
-/*
- * Alignment for namelist and valuelist entries (since they are mixed
- * there can be only one alignment value)
- */
-#define XFS_ATTR_LEAF_NAME_ALIGN ((uint)sizeof(xfs_dablk_t))
-
-/*
- * Cast typed pointers for "local" and "remote" name/value structs.
- */
-#define XFS_ATTR_LEAF_NAME_REMOTE(leafp,idx) \
- xfs_attr_leaf_name_remote(leafp,idx)
-static inline xfs_attr_leaf_name_remote_t *
-xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
-{
- return (xfs_attr_leaf_name_remote_t *)
- &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
-}
-
-#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \
- xfs_attr_leaf_name_local(leafp,idx)
-static inline xfs_attr_leaf_name_local_t *
-xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
-{
- return (xfs_attr_leaf_name_local_t *)
- &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
-}
-
-#define XFS_ATTR_LEAF_NAME(leafp,idx) \
- xfs_attr_leaf_name(leafp,idx)
-static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
-{
- return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
-}
-
-/*
- * Calculate total bytes used (including trailing pad for alignment) for
- * a "local" name/value structure, a "remote" name/value structure, and
- * a pointer which might be either.
- */
-#define XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen) \
- xfs_attr_leaf_entsize_remote(nlen)
-static inline int xfs_attr_leaf_entsize_remote(int nlen)
-{
- return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
- XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
-}
-
-#define XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen,vlen) \
- xfs_attr_leaf_entsize_local(nlen,vlen)
-static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
-{
- return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
- XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
-}
-
-#define XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize) \
- xfs_attr_leaf_entsize_local_max(bsize)
-static inline int xfs_attr_leaf_entsize_local_max(int bsize)
-{
- return (((bsize) >> 1) + ((bsize) >> 2));
-}
-
-
-/*========================================================================
- * Structure used to pass context around among the routines.
- *========================================================================*/
-
-
-struct xfs_attr_list_context;
-
-typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, struct attrnames *,
- char *, int, int, char *);
-
-typedef struct xfs_attr_list_context {
- struct xfs_inode *dp; /* inode */
- struct attrlist_cursor_kern *cursor; /* position in list */
- struct attrlist *alist; /* output buffer */
- int seen_enough; /* T/F: seen enough of list? */
- int count; /* num used entries */
- int dupcnt; /* count dup hashvals seen */
- int bufsize; /* total buffer size */
- int firstu; /* first used byte in buffer */
- int flags; /* from VOP call */
- int resynch; /* T/F: resynch with cursor */
- int put_value; /* T/F: need value for listent */
- put_listent_func_t put_listent; /* list output fmt function */
- int index; /* index into output buffer */
-} xfs_attr_list_context_t;
-
/*
* Used to keep a list of "remote value" extents when unlinking an inode.
*/
@@ -254,53 +51,58 @@ int xfs_attr_shortform_getvalue(struct xfs_da_args *args);
int xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
int xfs_attr_shortform_remove(struct xfs_da_args *args);
int xfs_attr_shortform_list(struct xfs_attr_list_context *context);
-int xfs_attr_shortform_allfit(struct xfs_dabuf *bp, struct xfs_inode *dp);
+int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
/*
* Internal routines when attribute fork size == XFS_LBSIZE(mp).
*/
-int xfs_attr_leaf_to_node(struct xfs_da_args *args);
-int xfs_attr_leaf_to_shortform(struct xfs_dabuf *bp,
+int xfs_attr3_leaf_to_node(struct xfs_da_args *args);
+int xfs_attr3_leaf_to_shortform(struct xfs_buf *bp,
struct xfs_da_args *args, int forkoff);
-int xfs_attr_leaf_clearflag(struct xfs_da_args *args);
-int xfs_attr_leaf_setflag(struct xfs_da_args *args);
-int xfs_attr_leaf_flipflags(xfs_da_args_t *args);
+int xfs_attr3_leaf_clearflag(struct xfs_da_args *args);
+int xfs_attr3_leaf_setflag(struct xfs_da_args *args);
+int xfs_attr3_leaf_flipflags(struct xfs_da_args *args);
/*
* Routines used for growing the Btree.
*/
-int xfs_attr_leaf_split(struct xfs_da_state *state,
+int xfs_attr3_leaf_split(struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk,
struct xfs_da_state_blk *newblk);
-int xfs_attr_leaf_lookup_int(struct xfs_dabuf *leaf,
+int xfs_attr3_leaf_lookup_int(struct xfs_buf *leaf,
struct xfs_da_args *args);
-int xfs_attr_leaf_getvalue(struct xfs_dabuf *bp, struct xfs_da_args *args);
-int xfs_attr_leaf_add(struct xfs_dabuf *leaf_buffer,
+int xfs_attr3_leaf_getvalue(struct xfs_buf *bp, struct xfs_da_args *args);
+int xfs_attr3_leaf_add(struct xfs_buf *leaf_buffer,
struct xfs_da_args *args);
-int xfs_attr_leaf_remove(struct xfs_dabuf *leaf_buffer,
+int xfs_attr3_leaf_remove(struct xfs_buf *leaf_buffer,
struct xfs_da_args *args);
-int xfs_attr_leaf_list_int(struct xfs_dabuf *bp,
+int xfs_attr3_leaf_list_int(struct xfs_buf *bp,
struct xfs_attr_list_context *context);
/*
* Routines used for shrinking the Btree.
*/
-int xfs_attr_leaf_toosmall(struct xfs_da_state *state, int *retval);
-void xfs_attr_leaf_unbalance(struct xfs_da_state *state,
+int xfs_attr3_leaf_toosmall(struct xfs_da_state *state, int *retval);
+void xfs_attr3_leaf_unbalance(struct xfs_da_state *state,
struct xfs_da_state_blk *drop_blk,
struct xfs_da_state_blk *save_blk);
-int xfs_attr_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp);
+int xfs_attr3_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp);
/*
* Utility routines.
*/
-xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_dabuf *bp, int *count);
-int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp,
- struct xfs_dabuf *leaf2_bp);
-int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize,
- int *local);
-int xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp);
+xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_buf *bp, int *count);
+int xfs_attr_leaf_order(struct xfs_buf *leaf1_bp,
+ struct xfs_buf *leaf2_bp);
+int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int *local);
+int xfs_attr3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
+ xfs_dablk_t bno, xfs_daddr_t mappedbno,
+ struct xfs_buf **bpp);
+void xfs_attr3_leaf_hdr_from_disk(struct xfs_attr3_icleaf_hdr *to,
+ struct xfs_attr_leafblock *from);
+void xfs_attr3_leaf_hdr_to_disk(struct xfs_attr_leafblock *to,
+ struct xfs_attr3_icleaf_hdr *from);
#endif /* __XFS_ATTR_LEAF_H__ */
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
new file mode 100644
index 00000000000..90e2eeb2120
--- /dev/null
+++ b/fs/xfs/xfs_attr_list.c
@@ -0,0 +1,653 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_inode_item.h"
+#include "xfs_bmap.h"
+#include "xfs_attr.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_remote.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_buf_item.h"
+#include "xfs_cksum.h"
+#include "xfs_dinode.h"
+#include "xfs_dir2.h"
+
+STATIC int
+xfs_attr_shortform_compare(const void *a, const void *b)
+{
+ xfs_attr_sf_sort_t *sa, *sb;
+
+ sa = (xfs_attr_sf_sort_t *)a;
+ sb = (xfs_attr_sf_sort_t *)b;
+ if (sa->hash < sb->hash) {
+ return(-1);
+ } else if (sa->hash > sb->hash) {
+ return(1);
+ } else {
+ return(sa->entno - sb->entno);
+ }
+}
+
+#define XFS_ISRESET_CURSOR(cursor) \
+ (!((cursor)->initted) && !((cursor)->hashval) && \
+ !((cursor)->blkno) && !((cursor)->offset))
+/*
+ * Copy out entries of shortform attribute lists for attr_list().
+ * Shortform attribute lists are not stored in hashval sorted order.
+ * If the output buffer is not large enough to hold them all, then we
+ * we have to calculate each entries' hashvalue and sort them before
+ * we can begin returning them to the user.
+ */
+int
+xfs_attr_shortform_list(xfs_attr_list_context_t *context)
+{
+ attrlist_cursor_kern_t *cursor;
+ xfs_attr_sf_sort_t *sbuf, *sbp;
+ xfs_attr_shortform_t *sf;
+ xfs_attr_sf_entry_t *sfe;
+ xfs_inode_t *dp;
+ int sbsize, nsbuf, count, i;
+ int error;
+
+ ASSERT(context != NULL);
+ dp = context->dp;
+ ASSERT(dp != NULL);
+ ASSERT(dp->i_afp != NULL);
+ sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
+ ASSERT(sf != NULL);
+ if (!sf->hdr.count)
+ return(0);
+ cursor = context->cursor;
+ ASSERT(cursor != NULL);
+
+ trace_xfs_attr_list_sf(context);
+
+ /*
+ * If the buffer is large enough and the cursor is at the start,
+ * do not bother with sorting since we will return everything in
+ * one buffer and another call using the cursor won't need to be
+ * made.
+ * Note the generous fudge factor of 16 overhead bytes per entry.
+ * If bufsize is zero then put_listent must be a search function
+ * and can just scan through what we have.
+ */
+ if (context->bufsize == 0 ||
+ (XFS_ISRESET_CURSOR(cursor) &&
+ (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
+ for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
+ error = context->put_listent(context,
+ sfe->flags,
+ sfe->nameval,
+ (int)sfe->namelen,
+ (int)sfe->valuelen,
+ &sfe->nameval[sfe->namelen]);
+
+ /*
+ * Either search callback finished early or
+ * didn't fit it all in the buffer after all.
+ */
+ if (context->seen_enough)
+ break;
+
+ if (error)
+ return error;
+ sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+ }
+ trace_xfs_attr_list_sf_all(context);
+ return(0);
+ }
+
+ /* do no more for a search callback */
+ if (context->bufsize == 0)
+ return 0;
+
+ /*
+ * It didn't all fit, so we have to sort everything on hashval.
+ */
+ sbsize = sf->hdr.count * sizeof(*sbuf);
+ sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
+
+ /*
+ * Scan the attribute list for the rest of the entries, storing
+ * the relevant info from only those that match into a buffer.
+ */
+ nsbuf = 0;
+ for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
+ if (unlikely(
+ ((char *)sfe < (char *)sf) ||
+ ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
+ XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
+ XFS_ERRLEVEL_LOW,
+ context->dp->i_mount, sfe);
+ kmem_free(sbuf);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+
+ sbp->entno = i;
+ sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
+ sbp->name = sfe->nameval;
+ sbp->namelen = sfe->namelen;
+ /* These are bytes, and both on-disk, don't endian-flip */
+ sbp->valuelen = sfe->valuelen;
+ sbp->flags = sfe->flags;
+ sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+ sbp++;
+ nsbuf++;
+ }
+
+ /*
+ * Sort the entries on hash then entno.
+ */
+ xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
+
+ /*
+ * Re-find our place IN THE SORTED LIST.
+ */
+ count = 0;
+ cursor->initted = 1;
+ cursor->blkno = 0;
+ for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
+ if (sbp->hash == cursor->hashval) {
+ if (cursor->offset == count) {
+ break;
+ }
+ count++;
+ } else if (sbp->hash > cursor->hashval) {
+ break;
+ }
+ }
+ if (i == nsbuf) {
+ kmem_free(sbuf);
+ return(0);
+ }
+
+ /*
+ * Loop putting entries into the user buffer.
+ */
+ for ( ; i < nsbuf; i++, sbp++) {
+ if (cursor->hashval != sbp->hash) {
+ cursor->hashval = sbp->hash;
+ cursor->offset = 0;
+ }
+ error = context->put_listent(context,
+ sbp->flags,
+ sbp->name,
+ sbp->namelen,
+ sbp->valuelen,
+ &sbp->name[sbp->namelen]);
+ if (error)
+ return error;
+ if (context->seen_enough)
+ break;
+ cursor->offset++;
+ }
+
+ kmem_free(sbuf);
+ return(0);
+}
+
+STATIC int
+xfs_attr_node_list(xfs_attr_list_context_t *context)
+{
+ attrlist_cursor_kern_t *cursor;
+ xfs_attr_leafblock_t *leaf;
+ xfs_da_intnode_t *node;
+ struct xfs_attr3_icleaf_hdr leafhdr;
+ struct xfs_da3_icnode_hdr nodehdr;
+ struct xfs_da_node_entry *btree;
+ int error, i;
+ struct xfs_buf *bp;
+ struct xfs_inode *dp = context->dp;
+
+ trace_xfs_attr_node_list(context);
+
+ cursor = context->cursor;
+ cursor->initted = 1;
+
+ /*
+ * Do all sorts of validation on the passed-in cursor structure.
+ * If anything is amiss, ignore the cursor and look up the hashval
+ * starting from the btree root.
+ */
+ bp = NULL;
+ if (cursor->blkno > 0) {
+ error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1,
+ &bp, XFS_ATTR_FORK);
+ if ((error != 0) && (error != EFSCORRUPTED))
+ return(error);
+ if (bp) {
+ struct xfs_attr_leaf_entry *entries;
+
+ node = bp->b_addr;
+ switch (be16_to_cpu(node->hdr.info.magic)) {
+ case XFS_DA_NODE_MAGIC:
+ case XFS_DA3_NODE_MAGIC:
+ trace_xfs_attr_list_wrong_blk(context);
+ xfs_trans_brelse(NULL, bp);
+ bp = NULL;
+ break;
+ case XFS_ATTR_LEAF_MAGIC:
+ case XFS_ATTR3_LEAF_MAGIC:
+ leaf = bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+ entries = xfs_attr3_leaf_entryp(leaf);
+ if (cursor->hashval > be32_to_cpu(
+ entries[leafhdr.count - 1].hashval)) {
+ trace_xfs_attr_list_wrong_blk(context);
+ xfs_trans_brelse(NULL, bp);
+ bp = NULL;
+ } else if (cursor->hashval <= be32_to_cpu(
+ entries[0].hashval)) {
+ trace_xfs_attr_list_wrong_blk(context);
+ xfs_trans_brelse(NULL, bp);
+ bp = NULL;
+ }
+ break;
+ default:
+ trace_xfs_attr_list_wrong_blk(context);
+ xfs_trans_brelse(NULL, bp);
+ bp = NULL;
+ }
+ }
+ }
+
+ /*
+ * We did not find what we expected given the cursor's contents,
+ * so we start from the top and work down based on the hash value.
+ * Note that start of node block is same as start of leaf block.
+ */
+ if (bp == NULL) {
+ cursor->blkno = 0;
+ for (;;) {
+ __uint16_t magic;
+
+ error = xfs_da3_node_read(NULL, dp,
+ cursor->blkno, -1, &bp,
+ XFS_ATTR_FORK);
+ if (error)
+ return(error);
+ node = bp->b_addr;
+ magic = be16_to_cpu(node->hdr.info.magic);
+ if (magic == XFS_ATTR_LEAF_MAGIC ||
+ magic == XFS_ATTR3_LEAF_MAGIC)
+ break;
+ if (magic != XFS_DA_NODE_MAGIC &&
+ magic != XFS_DA3_NODE_MAGIC) {
+ XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
+ XFS_ERRLEVEL_LOW,
+ context->dp->i_mount,
+ node);
+ xfs_trans_brelse(NULL, bp);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ btree = dp->d_ops->node_tree_p(node);
+ for (i = 0; i < nodehdr.count; btree++, i++) {
+ if (cursor->hashval
+ <= be32_to_cpu(btree->hashval)) {
+ cursor->blkno = be32_to_cpu(btree->before);
+ trace_xfs_attr_list_node_descend(context,
+ btree);
+ break;
+ }
+ }
+ if (i == nodehdr.count) {
+ xfs_trans_brelse(NULL, bp);
+ return 0;
+ }
+ xfs_trans_brelse(NULL, bp);
+ }
+ }
+ ASSERT(bp != NULL);
+
+ /*
+ * Roll upward through the blocks, processing each leaf block in
+ * order. As long as there is space in the result buffer, keep
+ * adding the information.
+ */
+ for (;;) {
+ leaf = bp->b_addr;
+ error = xfs_attr3_leaf_list_int(bp, context);
+ if (error) {
+ xfs_trans_brelse(NULL, bp);
+ return error;
+ }
+ xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+ if (context->seen_enough || leafhdr.forw == 0)
+ break;
+ cursor->blkno = leafhdr.forw;
+ xfs_trans_brelse(NULL, bp);
+ error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp);
+ if (error)
+ return error;
+ }
+ xfs_trans_brelse(NULL, bp);
+ return 0;
+}
+
+/*
+ * Copy out attribute list entries for attr_list(), for leaf attribute lists.
+ */
+int
+xfs_attr3_leaf_list_int(
+ struct xfs_buf *bp,
+ struct xfs_attr_list_context *context)
+{
+ struct attrlist_cursor_kern *cursor;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_attr_leaf_entry *entries;
+ struct xfs_attr_leaf_entry *entry;
+ int retval;
+ int i;
+
+ trace_xfs_attr_list_leaf(context);
+
+ leaf = bp->b_addr;
+ xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+ entries = xfs_attr3_leaf_entryp(leaf);
+
+ cursor = context->cursor;
+ cursor->initted = 1;
+
+ /*
+ * Re-find our place in the leaf block if this is a new syscall.
+ */
+ if (context->resynch) {
+ entry = &entries[0];
+ for (i = 0; i < ichdr.count; entry++, i++) {
+ if (be32_to_cpu(entry->hashval) == cursor->hashval) {
+ if (cursor->offset == context->dupcnt) {
+ context->dupcnt = 0;
+ break;
+ }
+ context->dupcnt++;
+ } else if (be32_to_cpu(entry->hashval) >
+ cursor->hashval) {
+ context->dupcnt = 0;
+ break;
+ }
+ }
+ if (i == ichdr.count) {
+ trace_xfs_attr_list_notfound(context);
+ return 0;
+ }
+ } else {
+ entry = &entries[0];
+ i = 0;
+ }
+ context->resynch = 0;
+
+ /*
+ * We have found our place, start copying out the new attributes.
+ */
+ retval = 0;
+ for (; i < ichdr.count; entry++, i++) {
+ if (be32_to_cpu(entry->hashval) != cursor->hashval) {
+ cursor->hashval = be32_to_cpu(entry->hashval);
+ cursor->offset = 0;
+ }
+
+ if (entry->flags & XFS_ATTR_INCOMPLETE)
+ continue; /* skip incomplete entries */
+
+ if (entry->flags & XFS_ATTR_LOCAL) {
+ xfs_attr_leaf_name_local_t *name_loc =
+ xfs_attr3_leaf_name_local(leaf, i);
+
+ retval = context->put_listent(context,
+ entry->flags,
+ name_loc->nameval,
+ (int)name_loc->namelen,
+ be16_to_cpu(name_loc->valuelen),
+ &name_loc->nameval[name_loc->namelen]);
+ if (retval)
+ return retval;
+ } else {
+ xfs_attr_leaf_name_remote_t *name_rmt =
+ xfs_attr3_leaf_name_remote(leaf, i);
+
+ int valuelen = be32_to_cpu(name_rmt->valuelen);
+
+ if (context->put_value) {
+ xfs_da_args_t args;
+
+ memset((char *)&args, 0, sizeof(args));
+ args.geo = context->dp->i_mount->m_attr_geo;
+ args.dp = context->dp;
+ args.whichfork = XFS_ATTR_FORK;
+ args.valuelen = valuelen;
+ args.rmtvaluelen = valuelen;
+ args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
+ args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
+ args.rmtblkcnt = xfs_attr3_rmt_blocks(
+ args.dp->i_mount, valuelen);
+ retval = xfs_attr_rmtval_get(&args);
+ if (retval)
+ return retval;
+ retval = context->put_listent(context,
+ entry->flags,
+ name_rmt->name,
+ (int)name_rmt->namelen,
+ valuelen,
+ args.value);
+ kmem_free(args.value);
+ } else {
+ retval = context->put_listent(context,
+ entry->flags,
+ name_rmt->name,
+ (int)name_rmt->namelen,
+ valuelen,
+ NULL);
+ }
+ if (retval)
+ return retval;
+ }
+ if (context->seen_enough)
+ break;
+ cursor->offset++;
+ }
+ trace_xfs_attr_list_leaf_end(context);
+ return retval;
+}
+
+/*
+ * Copy out attribute entries for attr_list(), for leaf attribute lists.
+ */
+STATIC int
+xfs_attr_leaf_list(xfs_attr_list_context_t *context)
+{
+ int error;
+ struct xfs_buf *bp;
+
+ trace_xfs_attr_leaf_list(context);
+
+ context->cursor->blkno = 0;
+ error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp);
+ if (error)
+ return XFS_ERROR(error);
+
+ error = xfs_attr3_leaf_list_int(bp, context);
+ xfs_trans_brelse(NULL, bp);
+ return XFS_ERROR(error);
+}
+
+int
+xfs_attr_list_int(
+ xfs_attr_list_context_t *context)
+{
+ int error;
+ xfs_inode_t *dp = context->dp;
+ uint lock_mode;
+
+ XFS_STATS_INC(xs_attr_list);
+
+ if (XFS_FORCED_SHUTDOWN(dp->i_mount))
+ return EIO;
+
+ /*
+ * Decide on what work routines to call based on the inode size.
+ */
+ lock_mode = xfs_ilock_attr_map_shared(dp);
+ if (!xfs_inode_hasattr(dp)) {
+ error = 0;
+ } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+ error = xfs_attr_shortform_list(context);
+ } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+ error = xfs_attr_leaf_list(context);
+ } else {
+ error = xfs_attr_node_list(context);
+ }
+ xfs_iunlock(dp, lock_mode);
+ return error;
+}
+
+#define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
+ (((struct attrlist_ent *) 0)->a_name - (char *) 0)
+#define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
+ ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
+ & ~(sizeof(u_int32_t)-1))
+
+/*
+ * Format an attribute and copy it out to the user's buffer.
+ * Take care to check values and protect against them changing later,
+ * we may be reading them directly out of a user buffer.
+ */
+STATIC int
+xfs_attr_put_listent(
+ xfs_attr_list_context_t *context,
+ int flags,
+ unsigned char *name,
+ int namelen,
+ int valuelen,
+ unsigned char *value)
+{
+ struct attrlist *alist = (struct attrlist *)context->alist;
+ attrlist_ent_t *aep;
+ int arraytop;
+
+ ASSERT(!(context->flags & ATTR_KERNOVAL));
+ ASSERT(context->count >= 0);
+ ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
+ ASSERT(context->firstu >= sizeof(*alist));
+ ASSERT(context->firstu <= context->bufsize);
+
+ /*
+ * Only list entries in the right namespace.
+ */
+ if (((context->flags & ATTR_SECURE) == 0) !=
+ ((flags & XFS_ATTR_SECURE) == 0))
+ return 0;
+ if (((context->flags & ATTR_ROOT) == 0) !=
+ ((flags & XFS_ATTR_ROOT) == 0))
+ return 0;
+
+ arraytop = sizeof(*alist) +
+ context->count * sizeof(alist->al_offset[0]);
+ context->firstu -= ATTR_ENTSIZE(namelen);
+ if (context->firstu < arraytop) {
+ trace_xfs_attr_list_full(context);
+ alist->al_more = 1;
+ context->seen_enough = 1;
+ return 1;
+ }
+
+ aep = (attrlist_ent_t *)&context->alist[context->firstu];
+ aep->a_valuelen = valuelen;
+ memcpy(aep->a_name, name, namelen);
+ aep->a_name[namelen] = 0;
+ alist->al_offset[context->count++] = context->firstu;
+ alist->al_count = context->count;
+ trace_xfs_attr_list_add(context);
+ return 0;
+}
+
+/*
+ * Generate a list of extended attribute names and optionally
+ * also value lengths. Positive return value follows the XFS
+ * convention of being an error, zero or negative return code
+ * is the length of the buffer returned (negated), indicating
+ * success.
+ */
+int
+xfs_attr_list(
+ xfs_inode_t *dp,
+ char *buffer,
+ int bufsize,
+ int flags,
+ attrlist_cursor_kern_t *cursor)
+{
+ xfs_attr_list_context_t context;
+ struct attrlist *alist;
+ int error;
+
+ /*
+ * Validate the cursor.
+ */
+ if (cursor->pad1 || cursor->pad2)
+ return(XFS_ERROR(EINVAL));
+ if ((cursor->initted == 0) &&
+ (cursor->hashval || cursor->blkno || cursor->offset))
+ return XFS_ERROR(EINVAL);
+
+ /*
+ * Check for a properly aligned buffer.
+ */
+ if (((long)buffer) & (sizeof(int)-1))
+ return XFS_ERROR(EFAULT);
+ if (flags & ATTR_KERNOVAL)
+ bufsize = 0;
+
+ /*
+ * Initialize the output buffer.
+ */
+ memset(&context, 0, sizeof(context));
+ context.dp = dp;
+ context.cursor = cursor;
+ context.resynch = 1;
+ context.flags = flags;
+ context.alist = buffer;
+ context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */
+ context.firstu = context.bufsize;
+ context.put_listent = xfs_attr_put_listent;
+
+ alist = (struct attrlist *)context.alist;
+ alist->al_count = 0;
+ alist->al_more = 0;
+ alist->al_offset[0] = context.bufsize;
+
+ error = xfs_attr_list_int(&context);
+ ASSERT(error >= 0);
+ return error;
+}
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/xfs_attr_remote.c
new file mode 100644
index 00000000000..b5adfecbb8e
--- /dev/null
+++ b/fs/xfs/xfs_attr_remote.c
@@ -0,0 +1,628 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_trans.h"
+#include "xfs_inode_item.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_attr.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_attr_remote.h"
+#include "xfs_trans_space.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_buf_item.h"
+#include "xfs_error.h"
+
+#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
+
+/*
+ * Each contiguous block has a header, so it is not just a simple attribute
+ * length to FSB conversion.
+ */
+int
+xfs_attr3_rmt_blocks(
+ struct xfs_mount *mp,
+ int attrlen)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
+ return (attrlen + buflen - 1) / buflen;
+ }
+ return XFS_B_TO_FSB(mp, attrlen);
+}
+
+/*
+ * Checking of the remote attribute header is split into two parts. The verifier
+ * does CRC, location and bounds checking, the unpacking function checks the
+ * attribute parameters and owner.
+ */
+static bool
+xfs_attr3_rmt_hdr_ok(
+ void *ptr,
+ xfs_ino_t ino,
+ uint32_t offset,
+ uint32_t size,
+ xfs_daddr_t bno)
+{
+ struct xfs_attr3_rmt_hdr *rmt = ptr;
+
+ if (bno != be64_to_cpu(rmt->rm_blkno))
+ return false;
+ if (offset != be32_to_cpu(rmt->rm_offset))
+ return false;
+ if (size != be32_to_cpu(rmt->rm_bytes))
+ return false;
+ if (ino != be64_to_cpu(rmt->rm_owner))
+ return false;
+
+ /* ok */
+ return true;
+}
+
+static bool
+xfs_attr3_rmt_verify(
+ struct xfs_mount *mp,
+ void *ptr,
+ int fsbsize,
+ xfs_daddr_t bno)
+{
+ struct xfs_attr3_rmt_hdr *rmt = ptr;
+
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return false;
+ if (rmt->rm_magic != cpu_to_be32(XFS_ATTR3_RMT_MAGIC))
+ return false;
+ if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid))
+ return false;
+ if (be64_to_cpu(rmt->rm_blkno) != bno)
+ return false;
+ if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
+ return false;
+ if (be32_to_cpu(rmt->rm_offset) +
+ be32_to_cpu(rmt->rm_bytes) > XATTR_SIZE_MAX)
+ return false;
+ if (rmt->rm_owner == 0)
+ return false;
+
+ return true;
+}
+
+static void
+xfs_attr3_rmt_read_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ char *ptr;
+ int len;
+ xfs_daddr_t bno;
+ int blksize = mp->m_attr_geo->blksize;
+
+ /* no verification of non-crc buffers */
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return;
+
+ ptr = bp->b_addr;
+ bno = bp->b_bn;
+ len = BBTOB(bp->b_length);
+ ASSERT(len >= blksize);
+
+ while (len > 0) {
+ if (!xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
+ xfs_buf_ioerror(bp, EFSBADCRC);
+ break;
+ }
+ if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ break;
+ }
+ len -= blksize;
+ ptr += blksize;
+ bno += BTOBB(blksize);
+ }
+
+ if (bp->b_error)
+ xfs_verifier_error(bp);
+ else
+ ASSERT(len == 0);
+}
+
+static void
+xfs_attr3_rmt_write_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_buf_log_item *bip = bp->b_fspriv;
+ char *ptr;
+ int len;
+ xfs_daddr_t bno;
+ int blksize = mp->m_attr_geo->blksize;
+
+ /* no verification of non-crc buffers */
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return;
+
+ ptr = bp->b_addr;
+ bno = bp->b_bn;
+ len = BBTOB(bp->b_length);
+ ASSERT(len >= blksize);
+
+ while (len > 0) {
+ if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ xfs_verifier_error(bp);
+ return;
+ }
+ if (bip) {
+ struct xfs_attr3_rmt_hdr *rmt;
+
+ rmt = (struct xfs_attr3_rmt_hdr *)ptr;
+ rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+ }
+ xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
+
+ len -= blksize;
+ ptr += blksize;
+ bno += BTOBB(blksize);
+ }
+ ASSERT(len == 0);
+}
+
+const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
+ .verify_read = xfs_attr3_rmt_read_verify,
+ .verify_write = xfs_attr3_rmt_write_verify,
+};
+
+STATIC int
+xfs_attr3_rmt_hdr_set(
+ struct xfs_mount *mp,
+ void *ptr,
+ xfs_ino_t ino,
+ uint32_t offset,
+ uint32_t size,
+ xfs_daddr_t bno)
+{
+ struct xfs_attr3_rmt_hdr *rmt = ptr;
+
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return 0;
+
+ rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC);
+ rmt->rm_offset = cpu_to_be32(offset);
+ rmt->rm_bytes = cpu_to_be32(size);
+ uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid);
+ rmt->rm_owner = cpu_to_be64(ino);
+ rmt->rm_blkno = cpu_to_be64(bno);
+
+ return sizeof(struct xfs_attr3_rmt_hdr);
+}
+
+/*
+ * Helper functions to copy attribute data in and out of the one disk extents
+ */
+STATIC int
+xfs_attr_rmtval_copyout(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ xfs_ino_t ino,
+ int *offset,
+ int *valuelen,
+ __uint8_t **dst)
+{
+ char *src = bp->b_addr;
+ xfs_daddr_t bno = bp->b_bn;
+ int len = BBTOB(bp->b_length);
+ int blksize = mp->m_attr_geo->blksize;
+
+ ASSERT(len >= blksize);
+
+ while (len > 0 && *valuelen > 0) {
+ int hdr_size = 0;
+ int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
+
+ byte_cnt = min(*valuelen, byte_cnt);
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!xfs_attr3_rmt_hdr_ok(src, ino, *offset,
+ byte_cnt, bno)) {
+ xfs_alert(mp,
+"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
+ bno, *offset, byte_cnt, ino);
+ return EFSCORRUPTED;
+ }
+ hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
+ }
+
+ memcpy(*dst, src + hdr_size, byte_cnt);
+
+ /* roll buffer forwards */
+ len -= blksize;
+ src += blksize;
+ bno += BTOBB(blksize);
+
+ /* roll attribute data forwards */
+ *valuelen -= byte_cnt;
+ *dst += byte_cnt;
+ *offset += byte_cnt;
+ }
+ return 0;
+}
+
+STATIC void
+xfs_attr_rmtval_copyin(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ xfs_ino_t ino,
+ int *offset,
+ int *valuelen,
+ __uint8_t **src)
+{
+ char *dst = bp->b_addr;
+ xfs_daddr_t bno = bp->b_bn;
+ int len = BBTOB(bp->b_length);
+ int blksize = mp->m_attr_geo->blksize;
+
+ ASSERT(len >= blksize);
+
+ while (len > 0 && *valuelen > 0) {
+ int hdr_size;
+ int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
+
+ byte_cnt = min(*valuelen, byte_cnt);
+ hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
+ byte_cnt, bno);
+
+ memcpy(dst + hdr_size, *src, byte_cnt);
+
+ /*
+ * If this is the last block, zero the remainder of it.
+ * Check that we are actually the last block, too.
+ */
+ if (byte_cnt + hdr_size < blksize) {
+ ASSERT(*valuelen - byte_cnt == 0);
+ ASSERT(len == blksize);
+ memset(dst + hdr_size + byte_cnt, 0,
+ blksize - hdr_size - byte_cnt);
+ }
+
+ /* roll buffer forwards */
+ len -= blksize;
+ dst += blksize;
+ bno += BTOBB(blksize);
+
+ /* roll attribute data forwards */
+ *valuelen -= byte_cnt;
+ *src += byte_cnt;
+ *offset += byte_cnt;
+ }
+}
+
+/*
+ * Read the value associated with an attribute from the out-of-line buffer
+ * that we stored it in.
+ */
+int
+xfs_attr_rmtval_get(
+ struct xfs_da_args *args)
+{
+ struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
+ struct xfs_mount *mp = args->dp->i_mount;
+ struct xfs_buf *bp;
+ xfs_dablk_t lblkno = args->rmtblkno;
+ __uint8_t *dst = args->value;
+ int valuelen;
+ int nmap;
+ int error;
+ int blkcnt = args->rmtblkcnt;
+ int i;
+ int offset = 0;
+
+ trace_xfs_attr_rmtval_get(args);
+
+ ASSERT(!(args->flags & ATTR_KERNOVAL));
+ ASSERT(args->rmtvaluelen == args->valuelen);
+
+ valuelen = args->rmtvaluelen;
+ while (valuelen > 0) {
+ nmap = ATTR_RMTVALUE_MAPSIZE;
+ error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
+ blkcnt, map, &nmap,
+ XFS_BMAPI_ATTRFORK);
+ if (error)
+ return error;
+ ASSERT(nmap >= 1);
+
+ for (i = 0; (i < nmap) && (valuelen > 0); i++) {
+ xfs_daddr_t dblkno;
+ int dblkcnt;
+
+ ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
+ (map[i].br_startblock != HOLESTARTBLOCK));
+ dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
+ dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
+ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
+ dblkno, dblkcnt, 0, &bp,
+ &xfs_attr3_rmt_buf_ops);
+ if (error)
+ return error;
+
+ error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
+ &offset, &valuelen,
+ &dst);
+ xfs_buf_relse(bp);
+ if (error)
+ return error;
+
+ /* roll attribute extent map forwards */
+ lblkno += map[i].br_blockcount;
+ blkcnt -= map[i].br_blockcount;
+ }
+ }
+ ASSERT(valuelen == 0);
+ return 0;
+}
+
+/*
+ * Write the value associated with an attribute into the out-of-line buffer
+ * that we have defined for it.
+ */
+int
+xfs_attr_rmtval_set(
+ struct xfs_da_args *args)
+{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_bmbt_irec map;
+ xfs_dablk_t lblkno;
+ xfs_fileoff_t lfileoff = 0;
+ __uint8_t *src = args->value;
+ int blkcnt;
+ int valuelen;
+ int nmap;
+ int error;
+ int offset = 0;
+
+ trace_xfs_attr_rmtval_set(args);
+
+ /*
+ * Find a "hole" in the attribute address space large enough for
+ * us to drop the new attribute's value into. Because CRC enable
+ * attributes have headers, we can't just do a straight byte to FSB
+ * conversion and have to take the header space into account.
+ */
+ blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
+ error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
+ XFS_ATTR_FORK);
+ if (error)
+ return error;
+
+ args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
+ args->rmtblkcnt = blkcnt;
+
+ /*
+ * Roll through the "value", allocating blocks on disk as required.
+ */
+ while (blkcnt > 0) {
+ int committed;
+
+ /*
+ * Allocate a single extent, up to the size of the value.
+ */
+ xfs_bmap_init(args->flist, args->firstblock);
+ nmap = 1;
+ error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
+ blkcnt,
+ XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
+ args->firstblock, args->total, &map, &nmap,
+ args->flist);
+ if (!error) {
+ error = xfs_bmap_finish(&args->trans, args->flist,
+ &committed);
+ }
+ if (error) {
+ ASSERT(committed);
+ args->trans = NULL;
+ xfs_bmap_cancel(args->flist);
+ return(error);
+ }
+
+ /*
+ * bmap_finish() may have committed the last trans and started
+ * a new one. We need the inode to be in all transactions.
+ */
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp, 0);
+
+ ASSERT(nmap == 1);
+ ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
+ (map.br_startblock != HOLESTARTBLOCK));
+ lblkno += map.br_blockcount;
+ blkcnt -= map.br_blockcount;
+
+ /*
+ * Start the next trans in the chain.
+ */
+ error = xfs_trans_roll(&args->trans, dp);
+ if (error)
+ return (error);
+ }
+
+ /*
+ * Roll through the "value", copying the attribute value to the
+ * already-allocated blocks. Blocks are written synchronously
+ * so that we can know they are all on disk before we turn off
+ * the INCOMPLETE flag.
+ */
+ lblkno = args->rmtblkno;
+ blkcnt = args->rmtblkcnt;
+ valuelen = args->rmtvaluelen;
+ while (valuelen > 0) {
+ struct xfs_buf *bp;
+ xfs_daddr_t dblkno;
+ int dblkcnt;
+
+ ASSERT(blkcnt > 0);
+
+ xfs_bmap_init(args->flist, args->firstblock);
+ nmap = 1;
+ error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
+ blkcnt, &map, &nmap,
+ XFS_BMAPI_ATTRFORK);
+ if (error)
+ return(error);
+ ASSERT(nmap == 1);
+ ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
+ (map.br_startblock != HOLESTARTBLOCK));
+
+ dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
+ dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
+
+ bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0);
+ if (!bp)
+ return ENOMEM;
+ bp->b_ops = &xfs_attr3_rmt_buf_ops;
+
+ xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
+ &valuelen, &src);
+
+ error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
+ xfs_buf_relse(bp);
+ if (error)
+ return error;
+
+
+ /* roll attribute extent map forwards */
+ lblkno += map.br_blockcount;
+ blkcnt -= map.br_blockcount;
+ }
+ ASSERT(valuelen == 0);
+ return 0;
+}
+
+/*
+ * Remove the value associated with an attribute by deleting the
+ * out-of-line buffer that it is stored on.
+ */
+int
+xfs_attr_rmtval_remove(
+ struct xfs_da_args *args)
+{
+ struct xfs_mount *mp = args->dp->i_mount;
+ xfs_dablk_t lblkno;
+ int blkcnt;
+ int error;
+ int done;
+
+ trace_xfs_attr_rmtval_remove(args);
+
+ /*
+ * Roll through the "value", invalidating the attribute value's blocks.
+ */
+ lblkno = args->rmtblkno;
+ blkcnt = args->rmtblkcnt;
+ while (blkcnt > 0) {
+ struct xfs_bmbt_irec map;
+ struct xfs_buf *bp;
+ xfs_daddr_t dblkno;
+ int dblkcnt;
+ int nmap;
+
+ /*
+ * Try to remember where we decided to put the value.
+ */
+ nmap = 1;
+ error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
+ blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
+ if (error)
+ return(error);
+ ASSERT(nmap == 1);
+ ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
+ (map.br_startblock != HOLESTARTBLOCK));
+
+ dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
+ dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
+
+ /*
+ * If the "remote" value is in the cache, remove it.
+ */
+ bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
+ if (bp) {
+ xfs_buf_stale(bp);
+ xfs_buf_relse(bp);
+ bp = NULL;
+ }
+
+ lblkno += map.br_blockcount;
+ blkcnt -= map.br_blockcount;
+ }
+
+ /*
+ * Keep de-allocating extents until the remote-value region is gone.
+ */
+ lblkno = args->rmtblkno;
+ blkcnt = args->rmtblkcnt;
+ done = 0;
+ while (!done) {
+ int committed;
+
+ xfs_bmap_init(args->flist, args->firstblock);
+ error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
+ XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
+ 1, args->firstblock, args->flist,
+ &done);
+ if (!error) {
+ error = xfs_bmap_finish(&args->trans, args->flist,
+ &committed);
+ }
+ if (error) {
+ ASSERT(committed);
+ args->trans = NULL;
+ xfs_bmap_cancel(args->flist);
+ return error;
+ }
+
+ /*
+ * bmap_finish() may have committed the last trans and started
+ * a new one. We need the inode to be in all transactions.
+ */
+ if (committed)
+ xfs_trans_ijoin(args->trans, args->dp, 0);
+
+ /*
+ * Close out trans and start the next one in the chain.
+ */
+ error = xfs_trans_roll(&args->trans, args->dp);
+ if (error)
+ return (error);
+ }
+ return(0);
+}
diff --git a/fs/xfs/linux-2.6/xfs_version.h b/fs/xfs/xfs_attr_remote.h
index f8d279d7563..5a9acfa156d 100644
--- a/fs/xfs/linux-2.6/xfs_version.h
+++ b/fs/xfs/xfs_attr_remote.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001-2002,2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -15,15 +15,13 @@
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef __XFS_VERSION_H__
-#define __XFS_VERSION_H__
+#ifndef __XFS_ATTR_REMOTE_H__
+#define __XFS_ATTR_REMOTE_H__
-/*
- * Dummy file that can contain a timestamp to put into the
- * XFS init string, to help users keep track of what they're
- * running
- */
+int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
-#define XFS_VERSION_STRING "SGI XFS"
+int xfs_attr_rmtval_get(struct xfs_da_args *args);
+int xfs_attr_rmtval_set(struct xfs_da_args *args);
+int xfs_attr_rmtval_remove(struct xfs_da_args *args);
-#endif /* __XFS_VERSION_H__ */
+#endif /* __XFS_ATTR_REMOTE_H__ */
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h
index f67f917803b..919756e3ba5 100644
--- a/fs/xfs/xfs_attr_sf.h
+++ b/fs/xfs/xfs_attr_sf.h
@@ -25,8 +25,6 @@
* to fit into the literal area of the inode.
*/
-struct xfs_inode;
-
/*
* Entries are packed toward the top as tight as possible.
*/
@@ -54,7 +52,7 @@ typedef struct xfs_attr_sf_sort {
__uint8_t valuelen; /* length of value */
__uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */
xfs_dahash_t hash; /* this entry's hash value */
- char *name; /* name value, pointer into buffer */
+ unsigned char *name; /* name value, pointer into buffer */
} xfs_attr_sf_sort_t;
#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen) /* space name/value uses */ \
@@ -69,46 +67,4 @@ typedef struct xfs_attr_sf_sort {
(be16_to_cpu(((xfs_attr_shortform_t *) \
((dp)->i_afp->if_u1.if_data))->hdr.totsize))
-#if defined(XFS_ATTR_TRACE)
-/*
- * Kernel tracing support for attribute lists
- */
-struct xfs_attr_list_context;
-struct xfs_da_intnode;
-struct xfs_da_node_entry;
-struct xfs_attr_leafblock;
-
-#define XFS_ATTR_TRACE_SIZE 4096 /* size of global trace buffer */
-extern ktrace_t *xfs_attr_trace_buf;
-
-/*
- * Trace record types.
- */
-#define XFS_ATTR_KTRACE_L_C 1 /* context */
-#define XFS_ATTR_KTRACE_L_CN 2 /* context, node */
-#define XFS_ATTR_KTRACE_L_CB 3 /* context, btree */
-#define XFS_ATTR_KTRACE_L_CL 4 /* context, leaf */
-
-void xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context);
-void xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_intnode *node);
-void xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_node_entry *btree);
-void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
- struct xfs_attr_leafblock *leaf);
-void xfs_attr_trace_enter(int type, char *where,
- __psunsigned_t a2, __psunsigned_t a3,
- __psunsigned_t a4, __psunsigned_t a5,
- __psunsigned_t a6, __psunsigned_t a7,
- __psunsigned_t a8, __psunsigned_t a9,
- __psunsigned_t a10, __psunsigned_t a11,
- __psunsigned_t a12, __psunsigned_t a13,
- __psunsigned_t a14, __psunsigned_t a15);
-#else
-#define xfs_attr_trace_l_c(w,c)
-#define xfs_attr_trace_l_cn(w,c,n)
-#define xfs_attr_trace_l_cb(w,c,b)
-#define xfs_attr_trace_l_cl(w,c,l)
-#endif /* XFS_ATTR_TRACE */
-
#endif /* __XFS_ATTR_SF_H__ */
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c
index fab0b6d5a41..0e8885a5964 100644
--- a/fs/xfs/xfs_bit.c
+++ b/fs/xfs/xfs_bit.c
@@ -16,118 +16,13 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
+#include "xfs_log_format.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_buf_item.h"
/*
* XFS bit manipulation routines, used in non-realtime code.
*/
-#ifndef HAVE_ARCH_HIGHBIT
-/*
- * Index of high bit number in byte, -1 for none set, 0..7 otherwise.
- */
-static const char xfs_highbit[256] = {
- -1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */
- 3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */
- 4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */
- 4, 4, 4, 4, 4, 4, 4, 4, /* 18 .. 1f */
- 5, 5, 5, 5, 5, 5, 5, 5, /* 20 .. 27 */
- 5, 5, 5, 5, 5, 5, 5, 5, /* 28 .. 2f */
- 5, 5, 5, 5, 5, 5, 5, 5, /* 30 .. 37 */
- 5, 5, 5, 5, 5, 5, 5, 5, /* 38 .. 3f */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 40 .. 47 */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 48 .. 4f */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 50 .. 57 */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 58 .. 5f */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 60 .. 67 */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 68 .. 6f */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 70 .. 77 */
- 6, 6, 6, 6, 6, 6, 6, 6, /* 78 .. 7f */
- 7, 7, 7, 7, 7, 7, 7, 7, /* 80 .. 87 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* 88 .. 8f */
- 7, 7, 7, 7, 7, 7, 7, 7, /* 90 .. 97 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* 98 .. 9f */
- 7, 7, 7, 7, 7, 7, 7, 7, /* a0 .. a7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* a8 .. af */
- 7, 7, 7, 7, 7, 7, 7, 7, /* b0 .. b7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* b8 .. bf */
- 7, 7, 7, 7, 7, 7, 7, 7, /* c0 .. c7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* c8 .. cf */
- 7, 7, 7, 7, 7, 7, 7, 7, /* d0 .. d7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* d8 .. df */
- 7, 7, 7, 7, 7, 7, 7, 7, /* e0 .. e7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* e8 .. ef */
- 7, 7, 7, 7, 7, 7, 7, 7, /* f0 .. f7 */
- 7, 7, 7, 7, 7, 7, 7, 7, /* f8 .. ff */
-};
-#endif
-
-/*
- * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set.
- */
-inline int
-xfs_highbit32(
- __uint32_t v)
-{
-#ifdef HAVE_ARCH_HIGHBIT
- return highbit32(v);
-#else
- int i;
-
- if (v & 0xffff0000)
- if (v & 0xff000000)
- i = 24;
- else
- i = 16;
- else if (v & 0x0000ffff)
- if (v & 0x0000ff00)
- i = 8;
- else
- i = 0;
- else
- return -1;
- return i + xfs_highbit[(v >> i) & 0xff];
-#endif
-}
-
-/*
- * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set.
- */
-int
-xfs_lowbit64(
- __uint64_t v)
-{
- __uint32_t w = (__uint32_t)v;
- int n = 0;
-
- if (w) { /* lower bits */
- n = ffs(w);
- } else { /* upper bits */
- w = (__uint32_t)(v >> 32);
- if (w && (n = ffs(w)))
- n += 32;
- }
- return n - 1;
-}
-
-/*
- * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set.
- */
-int
-xfs_highbit64(
- __uint64_t v)
-{
- __uint32_t h = (__uint32_t)(v >> 32);
-
- if (h)
- return xfs_highbit32(h) + 32;
- return xfs_highbit32((__uint32_t)v);
-}
-
-
/*
* Return whether bitmap is empty.
* Size is number of words in the bitmap, which is padded to word boundary
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h
index 082641a9782..e1649c0d3e0 100644
--- a/fs/xfs/xfs_bit.h
+++ b/fs/xfs/xfs_bit.h
@@ -23,37 +23,57 @@
*/
/*
- * masks with n high/low bits set, 32-bit values & 64-bit values
+ * masks with n high/low bits set, 64-bit values
*/
-#define XFS_MASK32HI(n) xfs_mask32hi(n)
-static inline __uint32_t xfs_mask32hi(int n)
-{
- return (__uint32_t)-1 << (32 - (n));
-}
-#define XFS_MASK64HI(n) xfs_mask64hi(n)
static inline __uint64_t xfs_mask64hi(int n)
{
return (__uint64_t)-1 << (64 - (n));
}
-#define XFS_MASK32LO(n) xfs_mask32lo(n)
static inline __uint32_t xfs_mask32lo(int n)
{
return ((__uint32_t)1 << (n)) - 1;
}
-#define XFS_MASK64LO(n) xfs_mask64lo(n)
static inline __uint64_t xfs_mask64lo(int n)
{
return ((__uint64_t)1 << (n)) - 1;
}
/* Get high bit set out of 32-bit argument, -1 if none set */
-extern int xfs_highbit32(__uint32_t v);
+static inline int xfs_highbit32(__uint32_t v)
+{
+ return fls(v) - 1;
+}
+
+/* Get high bit set out of 64-bit argument, -1 if none set */
+static inline int xfs_highbit64(__uint64_t v)
+{
+ return fls64(v) - 1;
+}
+
+/* Get low bit set out of 32-bit argument, -1 if none set */
+static inline int xfs_lowbit32(__uint32_t v)
+{
+ return ffs(v) - 1;
+}
/* Get low bit set out of 64-bit argument, -1 if none set */
-extern int xfs_lowbit64(__uint64_t v);
+static inline int xfs_lowbit64(__uint64_t v)
+{
+ __uint32_t w = (__uint32_t)v;
+ int n = 0;
-/* Get high bit set out of 64-bit argument, -1 if none set */
-extern int xfs_highbit64(__uint64_t);
+ if (w) { /* lower bits */
+ n = ffs(w);
+ } else { /* upper bits */
+ w = (__uint32_t)(v >> 32);
+ if (w) {
+ n = ffs(w);
+ if (n)
+ n += 32;
+ }
+ }
+ return n - 1;
+}
/* Return whether bitmap is empty (1 == empty) */
extern int xfs_bitmap_empty(uint *map, uint size);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 53c259f5a5a..75c3fe5f3d9 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -17,160 +17,667 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
+#include "xfs_dir2.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_dmapi.h"
-#include "xfs_mount.h"
-#include "xfs_ialloc.h"
-#include "xfs_itable.h"
-#include "xfs_dir2_data.h"
-#include "xfs_dir2_leaf.h"
-#include "xfs_dir2_block.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_extfree_item.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
-#include "xfs_attr_leaf.h"
-#include "xfs_rw.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_buf_item.h"
+#include "xfs_trace.h"
+#include "xfs_symlink.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_dinode.h"
#include "xfs_filestream.h"
-#include "xfs_vnodeops.h"
-#ifdef DEBUG
+kmem_zone_t *xfs_bmap_free_item_zone;
+
+/*
+ * Miscellaneous helper functions
+ */
+
+/*
+ * Compute and fill in the value of the maximum depth of a bmap btree
+ * in this filesystem. Done once, during mount.
+ */
+void
+xfs_bmap_compute_maxlevels(
+ xfs_mount_t *mp, /* file system mount structure */
+ int whichfork) /* data or attr fork */
+{
+ int level; /* btree level */
+ uint maxblocks; /* max blocks at this level */
+ uint maxleafents; /* max leaf entries possible */
+ int maxrootrecs; /* max records in root block */
+ int minleafrecs; /* min records in leaf block */
+ int minnoderecs; /* min records in node block */
+ int sz; /* root block size */
+
+ /*
+ * The maximum number of extents in a file, hence the maximum
+ * number of leaf entries, is controlled by the type of di_nextents
+ * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
+ * (a signed 16-bit number, xfs_aextnum_t).
+ *
+ * Note that we can no longer assume that if we are in ATTR1 that
+ * the fork offset of all the inodes will be
+ * (xfs_default_attroffset(ip) >> 3) because we could have mounted
+ * with ATTR2 and then mounted back with ATTR1, keeping the
+ * di_forkoff's fixed but probably at various positions. Therefore,
+ * for both ATTR1 and ATTR2 we have to assume the worst case scenario
+ * of a minimum size available.
+ */
+ if (whichfork == XFS_DATA_FORK) {
+ maxleafents = MAXEXTNUM;
+ sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
+ } else {
+ maxleafents = MAXAEXTNUM;
+ sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
+ }
+ maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
+ minleafrecs = mp->m_bmap_dmnr[0];
+ minnoderecs = mp->m_bmap_dmnr[1];
+ maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
+ for (level = 1; maxblocks > 1; level++) {
+ if (maxblocks <= maxrootrecs)
+ maxblocks = 1;
+ else
+ maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
+ }
+ mp->m_bm_maxlevels[whichfork] = level;
+}
+
+STATIC int /* error */
+xfs_bmbt_lookup_eq(
+ struct xfs_btree_cur *cur,
+ xfs_fileoff_t off,
+ xfs_fsblock_t bno,
+ xfs_filblks_t len,
+ int *stat) /* success/failure */
+{
+ cur->bc_rec.b.br_startoff = off;
+ cur->bc_rec.b.br_startblock = bno;
+ cur->bc_rec.b.br_blockcount = len;
+ return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+}
+
+STATIC int /* error */
+xfs_bmbt_lookup_ge(
+ struct xfs_btree_cur *cur,
+ xfs_fileoff_t off,
+ xfs_fsblock_t bno,
+ xfs_filblks_t len,
+ int *stat) /* success/failure */
+{
+ cur->bc_rec.b.br_startoff = off;
+ cur->bc_rec.b.br_startblock = bno;
+ cur->bc_rec.b.br_blockcount = len;
+ return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
+}
+
+/*
+ * Check if the inode needs to be converted to btree format.
+ */
+static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
+{
+ return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_NEXTENTS(ip, whichfork) >
+ XFS_IFORK_MAXEXT(ip, whichfork);
+}
+
+/*
+ * Check if the inode should be converted to extent format.
+ */
+static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
+{
+ return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
+ XFS_IFORK_NEXTENTS(ip, whichfork) <=
+ XFS_IFORK_MAXEXT(ip, whichfork);
+}
+
+/*
+ * Update the record referred to by cur to the value given
+ * by [off, bno, len, state].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+STATIC int
+xfs_bmbt_update(
+ struct xfs_btree_cur *cur,
+ xfs_fileoff_t off,
+ xfs_fsblock_t bno,
+ xfs_filblks_t len,
+ xfs_exntst_t state)
+{
+ union xfs_btree_rec rec;
+
+ xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
+ return xfs_btree_update(cur, &rec);
+}
+
+/*
+ * Compute the worst-case number of indirect blocks that will be used
+ * for ip's delayed extent of length "len".
+ */
+STATIC xfs_filblks_t
+xfs_bmap_worst_indlen(
+ xfs_inode_t *ip, /* incore inode pointer */
+ xfs_filblks_t len) /* delayed extent length */
+{
+ int level; /* btree level number */
+ int maxrecs; /* maximum record count at this level */
+ xfs_mount_t *mp; /* mount structure */
+ xfs_filblks_t rval; /* return value */
+
+ mp = ip->i_mount;
+ maxrecs = mp->m_bmap_dmxr[0];
+ for (level = 0, rval = 0;
+ level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
+ level++) {
+ len += maxrecs - 1;
+ do_div(len, maxrecs);
+ rval += len;
+ if (len == 1)
+ return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
+ level - 1;
+ if (level == 0)
+ maxrecs = mp->m_bmap_dmxr[1];
+ }
+ return rval;
+}
+
+/*
+ * Calculate the default attribute fork offset for newly created inodes.
+ */
+uint
+xfs_default_attroffset(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ uint offset;
+
+ if (mp->m_sb.sb_inodesize == 256) {
+ offset = XFS_LITINO(mp, ip->i_d.di_version) -
+ XFS_BMDR_SPACE_CALC(MINABTPTRS);
+ } else {
+ offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
+ }
+
+ ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
+ return offset;
+}
+
+/*
+ * Helper routine to reset inode di_forkoff field when switching
+ * attribute fork from local to extent format - we reset it where
+ * possible to make space available for inline data fork extents.
+ */
STATIC void
-xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork);
-#endif
+xfs_bmap_forkoff_reset(
+ xfs_inode_t *ip,
+ int whichfork)
+{
+ if (whichfork == XFS_ATTR_FORK &&
+ ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
+ ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
+ ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
+ uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
-kmem_zone_t *xfs_bmap_free_item_zone;
+ if (dfl_forkoff > ip->i_d.di_forkoff)
+ ip->i_d.di_forkoff = dfl_forkoff;
+ }
+}
/*
- * Prototypes for internal bmap routines.
+ * Debug/sanity checking code
*/
+STATIC int
+xfs_bmap_sanity_check(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ int level)
+{
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+
+ if (block->bb_magic != cpu_to_be32(XFS_BMAP_CRC_MAGIC) &&
+ block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC))
+ return 0;
+
+ if (be16_to_cpu(block->bb_level) != level ||
+ be16_to_cpu(block->bb_numrecs) == 0 ||
+ be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
+ return 0;
+
+ return 1;
+}
+
+#ifdef DEBUG
+STATIC struct xfs_buf *
+xfs_bmap_get_bp(
+ struct xfs_btree_cur *cur,
+ xfs_fsblock_t bno)
+{
+ struct xfs_log_item_desc *lidp;
+ int i;
+
+ if (!cur)
+ return NULL;
+
+ for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
+ if (!cur->bc_bufs[i])
+ break;
+ if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
+ return cur->bc_bufs[i];
+ }
+
+ /* Chase down all the log items to see if the bp is there */
+ list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
+ struct xfs_buf_log_item *bip;
+ bip = (struct xfs_buf_log_item *)lidp->lid_item;
+ if (bip->bli_item.li_type == XFS_LI_BUF &&
+ XFS_BUF_ADDR(bip->bli_buf) == bno)
+ return bip->bli_buf;
+ }
+
+ return NULL;
+}
+
+STATIC void
+xfs_check_block(
+ struct xfs_btree_block *block,
+ xfs_mount_t *mp,
+ int root,
+ short sz)
+{
+ int i, j, dmxr;
+ __be64 *pp, *thispa; /* pointer to block address */
+ xfs_bmbt_key_t *prevp, *keyp;
+
+ ASSERT(be16_to_cpu(block->bb_level) > 0);
+
+ prevp = NULL;
+ for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
+ dmxr = mp->m_bmap_dmxr[0];
+ keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
+
+ if (prevp) {
+ ASSERT(be64_to_cpu(prevp->br_startoff) <
+ be64_to_cpu(keyp->br_startoff));
+ }
+ prevp = keyp;
+
+ /*
+ * Compare the block numbers to see if there are dups.
+ */
+ if (root)
+ pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
+ else
+ pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
+
+ for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
+ if (root)
+ thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
+ else
+ thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
+ if (*thispa == *pp) {
+ xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
+ __func__, j, i,
+ (unsigned long long)be64_to_cpu(*thispa));
+ panic("%s: ptrs are equal in node\n",
+ __func__);
+ }
+ }
+ }
+}
/*
- * Called from xfs_bmap_add_attrfork to handle extents format files.
+ * Check that the extents for the inode ip are in the right order in all
+ * btree leaves.
*/
-STATIC int /* error */
-xfs_bmap_add_attrfork_extents(
- xfs_trans_t *tp, /* transaction pointer */
+
+STATIC void
+xfs_bmap_check_leaf_extents(
+ xfs_btree_cur_t *cur, /* btree cursor or null */
xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated */
- xfs_bmap_free_t *flist, /* blocks to free at commit */
- int *flags); /* inode logging flags */
+ int whichfork) /* data or attr fork */
+{
+ struct xfs_btree_block *block; /* current btree block */
+ xfs_fsblock_t bno; /* block # of "block" */
+ xfs_buf_t *bp; /* buffer for "block" */
+ int error; /* error return value */
+ xfs_extnum_t i=0, j; /* index into the extents list */
+ xfs_ifork_t *ifp; /* fork structure */
+ int level; /* btree level, for checking */
+ xfs_mount_t *mp; /* file system mount structure */
+ __be64 *pp; /* pointer to block address */
+ xfs_bmbt_rec_t *ep; /* pointer to current extent */
+ xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
+ xfs_bmbt_rec_t *nextp; /* pointer to next extent */
+ int bp_release = 0;
+
+ if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
+ return;
+ }
+
+ bno = NULLFSBLOCK;
+ mp = ip->i_mount;
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ block = ifp->if_broot;
+ /*
+ * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+ */
+ level = be16_to_cpu(block->bb_level);
+ ASSERT(level > 0);
+ xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
+ pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
+ bno = be64_to_cpu(*pp);
+
+ ASSERT(bno != NULLDFSBNO);
+ ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
+ ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
+
+ /*
+ * Go down the tree until leaf level is reached, following the first
+ * pointer (leftmost) at each level.
+ */
+ while (level-- > 0) {
+ /* See if buf is in cur first */
+ bp_release = 0;
+ bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
+ if (!bp) {
+ bp_release = 1;
+ error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
+ XFS_BMAP_BTREE_REF,
+ &xfs_bmbt_buf_ops);
+ if (error)
+ goto error_norelse;
+ }
+ block = XFS_BUF_TO_BLOCK(bp);
+ XFS_WANT_CORRUPTED_GOTO(
+ xfs_bmap_sanity_check(mp, bp, level),
+ error0);
+ if (level == 0)
+ break;
+
+ /*
+ * Check this block for basic sanity (increasing keys and
+ * no duplicate blocks).
+ */
+
+ xfs_check_block(block, mp, 0, 0);
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
+ bno = be64_to_cpu(*pp);
+ XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
+ if (bp_release) {
+ bp_release = 0;
+ xfs_trans_brelse(NULL, bp);
+ }
+ }
+
+ /*
+ * Here with bp and block set to the leftmost leaf node in the tree.
+ */
+ i = 0;
+
+ /*
+ * Loop over all leaf nodes checking that all extents are in the right order.
+ */
+ for (;;) {
+ xfs_fsblock_t nextbno;
+ xfs_extnum_t num_recs;
+
+
+ num_recs = xfs_btree_get_numrecs(block);
+
+ /*
+ * Read-ahead the next leaf block, if any.
+ */
+
+ nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
+
+ /*
+ * Check all the extents to make sure they are OK.
+ * If we had a previous block, the last entry should
+ * conform with the first entry in this one.
+ */
+
+ ep = XFS_BMBT_REC_ADDR(mp, block, 1);
+ if (i) {
+ ASSERT(xfs_bmbt_disk_get_startoff(&last) +
+ xfs_bmbt_disk_get_blockcount(&last) <=
+ xfs_bmbt_disk_get_startoff(ep));
+ }
+ for (j = 1; j < num_recs; j++) {
+ nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
+ ASSERT(xfs_bmbt_disk_get_startoff(ep) +
+ xfs_bmbt_disk_get_blockcount(ep) <=
+ xfs_bmbt_disk_get_startoff(nextp));
+ ep = nextp;
+ }
+
+ last = *ep;
+ i += num_recs;
+ if (bp_release) {
+ bp_release = 0;
+ xfs_trans_brelse(NULL, bp);
+ }
+ bno = nextbno;
+ /*
+ * If we've reached the end, stop.
+ */
+ if (bno == NULLFSBLOCK)
+ break;
+
+ bp_release = 0;
+ bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
+ if (!bp) {
+ bp_release = 1;
+ error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
+ XFS_BMAP_BTREE_REF,
+ &xfs_bmbt_buf_ops);
+ if (error)
+ goto error_norelse;
+ }
+ block = XFS_BUF_TO_BLOCK(bp);
+ }
+ if (bp_release) {
+ bp_release = 0;
+ xfs_trans_brelse(NULL, bp);
+ }
+ return;
+
+error0:
+ xfs_warn(mp, "%s: at error0", __func__);
+ if (bp_release)
+ xfs_trans_brelse(NULL, bp);
+error_norelse:
+ xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
+ __func__, i);
+ panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
+ return;
+}
/*
- * Called from xfs_bmap_add_attrfork to handle local format files.
+ * Add bmap trace insert entries for all the contents of the extent records.
*/
-STATIC int /* error */
-xfs_bmap_add_attrfork_local(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated */
- xfs_bmap_free_t *flist, /* blocks to free at commit */
- int *flags); /* inode logging flags */
+void
+xfs_bmap_trace_exlist(
+ xfs_inode_t *ip, /* incore inode pointer */
+ xfs_extnum_t cnt, /* count of entries in the list */
+ int whichfork, /* data or attr fork */
+ unsigned long caller_ip)
+{
+ xfs_extnum_t idx; /* extent record index */
+ xfs_ifork_t *ifp; /* inode fork pointer */
+ int state = 0;
+
+ if (whichfork == XFS_ATTR_FORK)
+ state |= BMAP_ATTRFORK;
+
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
+ for (idx = 0; idx < cnt; idx++)
+ trace_xfs_extlist(ip, idx, whichfork, caller_ip);
+}
/*
- * Called by xfs_bmapi to update file extent records and the btree
- * after allocating space (or doing a delayed allocation).
+ * Validate that the bmbt_irecs being returned from bmapi are valid
+ * given the caller's original parameters. Specifically check the
+ * ranges of the returned irecs to ensure that they only extend beyond
+ * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
*/
-STATIC int /* error */
-xfs_bmap_add_extent(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
- xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
- xfs_bmbt_irec_t *new, /* new data to add to file extents */
- xfs_fsblock_t *first, /* pointer to firstblock variable */
- xfs_bmap_free_t *flist, /* list of extents to be freed */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
- int whichfork, /* data or attr fork */
- int rsvd); /* OK to allocate reserved blocks */
+STATIC void
+xfs_bmap_validate_ret(
+ xfs_fileoff_t bno,
+ xfs_filblks_t len,
+ int flags,
+ xfs_bmbt_irec_t *mval,
+ int nmap,
+ int ret_nmap)
+{
+ int i; /* index to map values */
+
+ ASSERT(ret_nmap <= nmap);
+
+ for (i = 0; i < ret_nmap; i++) {
+ ASSERT(mval[i].br_blockcount > 0);
+ if (!(flags & XFS_BMAPI_ENTIRE)) {
+ ASSERT(mval[i].br_startoff >= bno);
+ ASSERT(mval[i].br_blockcount <= len);
+ ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
+ bno + len);
+ } else {
+ ASSERT(mval[i].br_startoff < bno + len);
+ ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
+ bno);
+ }
+ ASSERT(i == 0 ||
+ mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
+ mval[i].br_startoff);
+ ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
+ mval[i].br_startblock != HOLESTARTBLOCK);
+ ASSERT(mval[i].br_state == XFS_EXT_NORM ||
+ mval[i].br_state == XFS_EXT_UNWRITTEN);
+ }
+}
+
+#else
+#define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
+#endif /* DEBUG */
/*
- * Called by xfs_bmap_add_extent to handle cases converting a delayed
- * allocation to a real allocation.
+ * bmap free list manipulation functions
*/
-STATIC int /* error */
-xfs_bmap_add_extent_delay_real(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
- xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
- xfs_bmbt_irec_t *new, /* new data to add to file extents */
- xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */
- xfs_fsblock_t *first, /* pointer to firstblock variable */
- xfs_bmap_free_t *flist, /* list of extents to be freed */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
- int rsvd); /* OK to allocate reserved blocks */
/*
- * Called by xfs_bmap_add_extent to handle cases converting a hole
- * to a delayed allocation.
+ * Add the extent to the list of extents to be free at transaction end.
+ * The list is maintained sorted (by block number).
*/
-STATIC int /* error */
-xfs_bmap_add_extent_hole_delay(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
- xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp,/* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
- int rsvd); /* OK to allocate reserved blocks */
+void
+xfs_bmap_add_free(
+ xfs_fsblock_t bno, /* fs block number of extent */
+ xfs_filblks_t len, /* length of extent */
+ xfs_bmap_free_t *flist, /* list of extents */
+ xfs_mount_t *mp) /* mount point structure */
+{
+ xfs_bmap_free_item_t *cur; /* current (next) element */
+ xfs_bmap_free_item_t *new; /* new element */
+ xfs_bmap_free_item_t *prev; /* previous element */
+#ifdef DEBUG
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+
+ ASSERT(bno != NULLFSBLOCK);
+ ASSERT(len > 0);
+ ASSERT(len <= MAXEXTLEN);
+ ASSERT(!isnullstartblock(bno));
+ agno = XFS_FSB_TO_AGNO(mp, bno);
+ agbno = XFS_FSB_TO_AGBNO(mp, bno);
+ ASSERT(agno < mp->m_sb.sb_agcount);
+ ASSERT(agbno < mp->m_sb.sb_agblocks);
+ ASSERT(len < mp->m_sb.sb_agblocks);
+ ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
+#endif
+ ASSERT(xfs_bmap_free_item_zone != NULL);
+ new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
+ new->xbfi_startblock = bno;
+ new->xbfi_blockcount = (xfs_extlen_t)len;
+ for (prev = NULL, cur = flist->xbf_first;
+ cur != NULL;
+ prev = cur, cur = cur->xbfi_next) {
+ if (cur->xbfi_startblock >= bno)
+ break;
+ }
+ if (prev)
+ prev->xbfi_next = new;
+ else
+ flist->xbf_first = new;
+ new->xbfi_next = cur;
+ flist->xbf_count++;
+}
/*
- * Called by xfs_bmap_add_extent to handle cases converting a hole
- * to a real allocation.
+ * Remove the entry "free" from the free item list. Prev points to the
+ * previous entry, unless "free" is the head of the list.
*/
-STATIC int /* error */
-xfs_bmap_add_extent_hole_real(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
- xfs_btree_cur_t *cur, /* if null, not a btree */
- xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
- int whichfork); /* data or attr fork */
+void
+xfs_bmap_del_free(
+ xfs_bmap_free_t *flist, /* free item list header */
+ xfs_bmap_free_item_t *prev, /* previous item on list, if any */
+ xfs_bmap_free_item_t *free) /* list item to be freed */
+{
+ if (prev)
+ prev->xbfi_next = free->xbfi_next;
+ else
+ flist->xbf_first = free->xbfi_next;
+ flist->xbf_count--;
+ kmem_zone_free(xfs_bmap_free_item_zone, free);
+}
/*
- * Called by xfs_bmap_add_extent to handle cases converting an unwritten
- * allocation to a real allocation or vice versa.
+ * Free up any items left in the list.
*/
-STATIC int /* error */
-xfs_bmap_add_extent_unwritten_real(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
- xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
- xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta); /* Change made to incore extents */
+void
+xfs_bmap_cancel(
+ xfs_bmap_free_t *flist) /* list of bmap_free_items */
+{
+ xfs_bmap_free_item_t *free; /* free list item */
+ xfs_bmap_free_item_t *next;
+
+ if (flist->xbf_count == 0)
+ return;
+ ASSERT(flist->xbf_first != NULL);
+ for (free = flist->xbf_first; free; free = next) {
+ next = free->xbfi_next;
+ xfs_bmap_del_free(flist, NULL, free);
+ }
+ ASSERT(flist->xbf_count == 0);
+}
/*
- * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
- * It figures out where to ask the underlying allocator to put the new extent.
+ * Inode fork format manipulation functions
*/
-STATIC int /* error */
-xfs_bmap_alloc(
- xfs_bmalloca_t *ap); /* bmap alloc argument struct */
/*
* Transform a btree format file with only one leaf node, where the
@@ -184,34 +691,53 @@ xfs_bmap_btree_to_extents(
xfs_inode_t *ip, /* incore inode pointer */
xfs_btree_cur_t *cur, /* btree cursor */
int *logflagsp, /* inode logging flags */
- int whichfork); /* data or attr fork */
-
-/*
- * Called by xfs_bmapi to update file extent records and the btree
- * after removing space (or undoing a delayed allocation).
- */
-STATIC int /* error */
-xfs_bmap_del_extent(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_trans_t *tp, /* current trans pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
- xfs_bmap_free_t *flist, /* list of extents to be freed */
- xfs_btree_cur_t *cur, /* if null, not a btree */
- xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp,/* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
- int whichfork, /* data or attr fork */
- int rsvd); /* OK to allocate reserved blocks */
+ int whichfork) /* data or attr fork */
+{
+ /* REFERENCED */
+ struct xfs_btree_block *cblock;/* child btree block */
+ xfs_fsblock_t cbno; /* child block number */
+ xfs_buf_t *cbp; /* child block's buffer */
+ int error; /* error return value */
+ xfs_ifork_t *ifp; /* inode fork data */
+ xfs_mount_t *mp; /* mount point structure */
+ __be64 *pp; /* ptr to block address */
+ struct xfs_btree_block *rblock;/* root btree block */
-/*
- * Remove the entry "free" from the free item list. Prev points to the
- * previous entry, unless "free" is the head of the list.
- */
-STATIC void
-xfs_bmap_del_free(
- xfs_bmap_free_t *flist, /* free item list header */
- xfs_bmap_free_item_t *prev, /* previous item on list, if any */
- xfs_bmap_free_item_t *free); /* list item to be freed */
+ mp = ip->i_mount;
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+ ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
+ rblock = ifp->if_broot;
+ ASSERT(be16_to_cpu(rblock->bb_level) == 1);
+ ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
+ ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
+ pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
+ cbno = be64_to_cpu(*pp);
+ *logflagsp = 0;
+#ifdef DEBUG
+ if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
+ return error;
+#endif
+ error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
+ &xfs_bmbt_buf_ops);
+ if (error)
+ return error;
+ cblock = XFS_BUF_TO_BLOCK(cbp);
+ if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
+ return error;
+ xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
+ ip->i_d.di_nblocks--;
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
+ xfs_trans_binval(tp, cbp);
+ if (cur->bc_bufs[0] == cbp)
+ cur->bc_bufs[0] = NULL;
+ xfs_iroot_realloc(ip, -1, whichfork);
+ ASSERT(ifp->if_broot == NULL);
+ ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
+ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
+ return 0;
+}
/*
* Convert an extents-format file into a btree-format file.
@@ -226,13 +752,165 @@ xfs_bmap_extents_to_btree(
xfs_btree_cur_t **curp, /* cursor returned to caller */
int wasdel, /* converting a delayed alloc */
int *logflagsp, /* inode logging flags */
- int whichfork); /* data or attr fork */
+ int whichfork) /* data or attr fork */
+{
+ struct xfs_btree_block *ablock; /* allocated (child) bt block */
+ xfs_buf_t *abp; /* buffer for ablock */
+ xfs_alloc_arg_t args; /* allocation arguments */
+ xfs_bmbt_rec_t *arp; /* child record pointer */
+ struct xfs_btree_block *block; /* btree root block */
+ xfs_btree_cur_t *cur; /* bmap btree cursor */
+ xfs_bmbt_rec_host_t *ep; /* extent record pointer */
+ int error; /* error return value */
+ xfs_extnum_t i, cnt; /* extent record index */
+ xfs_ifork_t *ifp; /* inode fork pointer */
+ xfs_bmbt_key_t *kp; /* root block key pointer */
+ xfs_mount_t *mp; /* mount structure */
+ xfs_extnum_t nextents; /* number of file extents */
+ xfs_bmbt_ptr_t *pp; /* root block address pointer */
+
+ mp = ip->i_mount;
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
+
+ /*
+ * Make space in the inode incore.
+ */
+ xfs_iroot_realloc(ip, 1, whichfork);
+ ifp->if_flags |= XFS_IFBROOT;
+
+ /*
+ * Fill in the root.
+ */
+ block = ifp->if_broot;
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
+ XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino,
+ XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
+ else
+ xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
+ XFS_BMAP_MAGIC, 1, 1, ip->i_ino,
+ XFS_BTREE_LONG_PTRS);
+
+ /*
+ * Need a cursor. Can't allocate until bb_level is filled in.
+ */
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+ cur->bc_private.b.firstblock = *firstblock;
+ cur->bc_private.b.flist = flist;
+ cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
+ /*
+ * Convert to a btree with two levels, one record in root.
+ */
+ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
+ memset(&args, 0, sizeof(args));
+ args.tp = tp;
+ args.mp = mp;
+ args.firstblock = *firstblock;
+ if (*firstblock == NULLFSBLOCK) {
+ args.type = XFS_ALLOCTYPE_START_BNO;
+ args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
+ } else if (flist->xbf_low) {
+ args.type = XFS_ALLOCTYPE_START_BNO;
+ args.fsbno = *firstblock;
+ } else {
+ args.type = XFS_ALLOCTYPE_NEAR_BNO;
+ args.fsbno = *firstblock;
+ }
+ args.minlen = args.maxlen = args.prod = 1;
+ args.wasdel = wasdel;
+ *logflagsp = 0;
+ if ((error = xfs_alloc_vextent(&args))) {
+ xfs_iroot_realloc(ip, -1, whichfork);
+ xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ return error;
+ }
+ /*
+ * Allocation can't fail, the space was reserved.
+ */
+ ASSERT(args.fsbno != NULLFSBLOCK);
+ ASSERT(*firstblock == NULLFSBLOCK ||
+ args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
+ (flist->xbf_low &&
+ args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
+ *firstblock = cur->bc_private.b.firstblock = args.fsbno;
+ cur->bc_private.b.allocated++;
+ ip->i_d.di_nblocks++;
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
+ abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
+ /*
+ * Fill in the child block.
+ */
+ abp->b_ops = &xfs_bmbt_buf_ops;
+ ablock = XFS_BUF_TO_BLOCK(abp);
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ xfs_btree_init_block_int(mp, ablock, abp->b_bn,
+ XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
+ XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
+ else
+ xfs_btree_init_block_int(mp, ablock, abp->b_bn,
+ XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
+ XFS_BTREE_LONG_PTRS);
+
+ arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
+ nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+ for (cnt = i = 0; i < nextents; i++) {
+ ep = xfs_iext_get_ext(ifp, i);
+ if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
+ arp->l0 = cpu_to_be64(ep->l0);
+ arp->l1 = cpu_to_be64(ep->l1);
+ arp++; cnt++;
+ }
+ }
+ ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
+ xfs_btree_set_numrecs(ablock, cnt);
+
+ /*
+ * Fill in the root key and pointer.
+ */
+ kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
+ arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
+ kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
+ be16_to_cpu(block->bb_level)));
+ *pp = cpu_to_be64(args.fsbno);
+
+ /*
+ * Do all this logging at the end so that
+ * the root is at the right level.
+ */
+ xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
+ xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
+ ASSERT(*curp == NULL);
+ *curp = cur;
+ *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
+ return 0;
+}
/*
* Convert a local file to an extents file.
- * This code is sort of bogus, since the file data needs to get
- * logged so it won't be lost. The bmap-level manipulations are ok, though.
+ * This code is out of bounds for data forks of regular files,
+ * since the file data needs to get logged so things will stay consistent.
+ * (The bmap-level manipulations are ok, though).
*/
+void
+xfs_bmap_local_to_extents_empty(
+ struct xfs_inode *ip,
+ int whichfork)
+{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+
+ ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
+ ASSERT(ifp->if_bytes == 0);
+ ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
+
+ xfs_bmap_forkoff_reset(ip, whichfork);
+ ifp->if_flags &= ~XFS_IFINLINE;
+ ifp->if_flags |= XFS_IFEXTENTS;
+ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+}
+
+
STATIC int /* error */
xfs_bmap_local_to_extents(
xfs_trans_t *tp, /* transaction pointer */
@@ -240,167 +918,89 @@ xfs_bmap_local_to_extents(
xfs_fsblock_t *firstblock, /* first block allocated in xaction */
xfs_extlen_t total, /* total blocks needed by transaction */
int *logflagsp, /* inode logging flags */
- int whichfork); /* data or attr fork */
-
-/*
- * Search the extents list for the inode, for the extent containing bno.
- * If bno lies in a hole, point to the next entry. If bno lies past eof,
- * *eofp will be set, and *prevp will contain the last entry (null if none).
- * Else, *lastxp will be set to the index of the found
- * entry; *gotp will contain the entry.
- */
-STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
-xfs_bmap_search_extents(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fileoff_t bno, /* block number searched for */
- int whichfork, /* data or attr fork */
- int *eofp, /* out: end of file found */
- xfs_extnum_t *lastxp, /* out: last extent index */
- xfs_bmbt_irec_t *gotp, /* out: extent entry found */
- xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */
-
-/*
- * Check the last inode extent to determine whether this allocation will result
- * in blocks being allocated at the end of the file. When we allocate new data
- * blocks at the end of the file which do not start at the previous data block,
- * we will try to align the new blocks at stripe unit boundaries.
- */
-STATIC int /* error */
-xfs_bmap_isaeof(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fileoff_t off, /* file offset in fsblocks */
- int whichfork, /* data or attribute fork */
- char *aeof); /* return value */
-
-#ifdef XFS_BMAP_TRACE
-/*
- * Add bmap trace entry prior to a call to xfs_iext_remove.
- */
-STATIC void
-xfs_bmap_trace_delete(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) deleted */
- xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
- int whichfork); /* data or attr fork */
-
-/*
- * Add bmap trace entry prior to a call to xfs_iext_insert, or
- * reading in the extents list from the disk (in the btree).
- */
-STATIC void
-xfs_bmap_trace_insert(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) inserted */
- xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
- xfs_bmbt_irec_t *r1, /* inserted record 1 */
- xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
- int whichfork); /* data or attr fork */
-
-/*
- * Add bmap trace entry after updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_post_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry updated */
- int whichfork); /* data or attr fork */
+ int whichfork,
+ void (*init_fn)(struct xfs_trans *tp,
+ struct xfs_buf *bp,
+ struct xfs_inode *ip,
+ struct xfs_ifork *ifp))
+{
+ int error = 0;
+ int flags; /* logging flags returned */
+ xfs_ifork_t *ifp; /* inode fork pointer */
+ xfs_alloc_arg_t args; /* allocation arguments */
+ xfs_buf_t *bp; /* buffer for extent block */
+ xfs_bmbt_rec_host_t *ep; /* extent record pointer */
-/*
- * Add bmap trace entry prior to updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_pre_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry to be updated */
- int whichfork); /* data or attr fork */
-
-#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) \
- xfs_bmap_trace_delete(__func__,d,ip,i,c,w)
-#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \
- xfs_bmap_trace_insert(__func__,d,ip,i,c,r1,r2,w)
-#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) \
- xfs_bmap_trace_post_update(__func__,d,ip,i,w)
-#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) \
- xfs_bmap_trace_pre_update(__func__,d,ip,i,w)
-#else
-#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w)
-#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w)
-#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w)
-#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w)
-#endif /* XFS_BMAP_TRACE */
+ /*
+ * We don't want to deal with the case of keeping inode data inline yet.
+ * So sending the data fork of a regular inode is invalid.
+ */
+ ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK));
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
-/*
- * Compute the worst-case number of indirect blocks that will be used
- * for ip's delayed extent of length "len".
- */
-STATIC xfs_filblks_t
-xfs_bmap_worst_indlen(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_filblks_t len); /* delayed extent length */
+ if (!ifp->if_bytes) {
+ xfs_bmap_local_to_extents_empty(ip, whichfork);
+ flags = XFS_ILOG_CORE;
+ goto done;
+ }
-#ifdef DEBUG
-/*
- * Perform various validation checks on the values being returned
- * from xfs_bmapi().
- */
-STATIC void
-xfs_bmap_validate_ret(
- xfs_fileoff_t bno,
- xfs_filblks_t len,
- int flags,
- xfs_bmbt_irec_t *mval,
- int nmap,
- int ret_nmap);
-#else
-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
-#endif /* DEBUG */
+ flags = 0;
+ error = 0;
+ ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
+ XFS_IFINLINE);
+ memset(&args, 0, sizeof(args));
+ args.tp = tp;
+ args.mp = ip->i_mount;
+ args.firstblock = *firstblock;
+ /*
+ * Allocate a block. We know we need only one, since the
+ * file currently fits in an inode.
+ */
+ if (*firstblock == NULLFSBLOCK) {
+ args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
+ args.type = XFS_ALLOCTYPE_START_BNO;
+ } else {
+ args.fsbno = *firstblock;
+ args.type = XFS_ALLOCTYPE_NEAR_BNO;
+ }
+ args.total = total;
+ args.minlen = args.maxlen = args.prod = 1;
+ error = xfs_alloc_vextent(&args);
+ if (error)
+ goto done;
-#if defined(XFS_RW_TRACE)
-STATIC void
-xfs_bunmap_trace(
- xfs_inode_t *ip,
- xfs_fileoff_t bno,
- xfs_filblks_t len,
- int flags,
- inst_t *ra);
-#else
-#define xfs_bunmap_trace(ip, bno, len, flags, ra)
-#endif /* XFS_RW_TRACE */
+ /* Can't fail, the space was reserved. */
+ ASSERT(args.fsbno != NULLFSBLOCK);
+ ASSERT(args.len == 1);
+ *firstblock = args.fsbno;
+ bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
-STATIC int
-xfs_bmap_count_tree(
- xfs_mount_t *mp,
- xfs_trans_t *tp,
- xfs_ifork_t *ifp,
- xfs_fsblock_t blockno,
- int levelin,
- int *count);
+ /* initialise the block and copy the data */
+ init_fn(tp, bp, ip, ifp);
-STATIC int
-xfs_bmap_count_leaves(
- xfs_ifork_t *ifp,
- xfs_extnum_t idx,
- int numrecs,
- int *count);
+ /* account for the change in fork size and log everything */
+ xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
+ xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
+ xfs_bmap_local_to_extents_empty(ip, whichfork);
+ flags |= XFS_ILOG_CORE;
-STATIC int
-xfs_bmap_disk_count_leaves(
- xfs_extnum_t idx,
- xfs_bmbt_block_t *block,
- int numrecs,
- int *count);
+ xfs_iext_add(ifp, 0, 1);
+ ep = xfs_iext_get_ext(ifp, 0);
+ xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
+ trace_xfs_bmap_post_update(ip, 0,
+ whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
+ _THIS_IP_);
+ XFS_IFORK_NEXT_SET(ip, whichfork, 1);
+ ip->i_d.di_nblocks = 1;
+ xfs_trans_mod_dquot_byino(tp, ip,
+ XFS_TRANS_DQ_BCOUNT, 1L);
+ flags |= xfs_ilog_fext(whichfork);
-/*
- * Bmap internal routines.
- */
+done:
+ *logflagsp = flags;
+ return error;
+}
/*
* Called from xfs_bmap_add_attrfork to handle btree format files.
@@ -422,14 +1022,14 @@ xfs_bmap_add_attrfork_btree(
if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
*flags |= XFS_ILOG_DBROOT;
else {
- cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
- XFS_DATA_FORK);
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
cur->bc_private.b.flist = flist;
cur->bc_private.b.firstblock = *firstblock;
if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
goto error0;
- ASSERT(stat == 1); /* must be at least one entry */
- if ((error = xfs_bmbt_newroot(cur, flags, &stat)))
+ /* must be at least one entry */
+ XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
+ if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
goto error0;
if (stat == 0) {
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
@@ -473,7 +1073,15 @@ xfs_bmap_add_attrfork_extents(
}
/*
- * Called from xfs_bmap_add_attrfork to handle local format files.
+ * Called from xfs_bmap_add_attrfork to handle local format files. Each
+ * different data fork content type needs a different callout to do the
+ * conversion. Some are basic and only require special block initialisation
+ * callouts for the data formating, others (directories) are so specialised they
+ * handle everything themselves.
+ *
+ * XXX (dgc): investigate whether directory conversion can use the generic
+ * formatting callout. It should be possible - it's just a very complex
+ * formatter.
*/
STATIC int /* error */
xfs_bmap_add_attrfork_local(
@@ -484,221 +1092,659 @@ xfs_bmap_add_attrfork_local(
int *flags) /* inode logging flags */
{
xfs_da_args_t dargs; /* args for dir/attr code */
- int error; /* error return value */
- xfs_mount_t *mp; /* mount structure pointer */
if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
return 0;
- if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
- mp = ip->i_mount;
+
+ if (S_ISDIR(ip->i_d.di_mode)) {
memset(&dargs, 0, sizeof(dargs));
+ dargs.geo = ip->i_mount->m_dir_geo;
dargs.dp = ip;
dargs.firstblock = firstblock;
dargs.flist = flist;
- dargs.total = mp->m_dirblkfsbs;
+ dargs.total = dargs.geo->fsbcount;
dargs.whichfork = XFS_DATA_FORK;
dargs.trans = tp;
- error = xfs_dir2_sf_to_block(&dargs);
- } else
- error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
- XFS_DATA_FORK);
+ return xfs_dir2_sf_to_block(&dargs);
+ }
+
+ if (S_ISLNK(ip->i_d.di_mode))
+ return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
+ flags, XFS_DATA_FORK,
+ xfs_symlink_local_to_remote);
+
+ /* should only be called for types that support local format data */
+ ASSERT(0);
+ return EFSCORRUPTED;
+}
+
+/*
+ * Convert inode from non-attributed to attributed.
+ * Must not be in a transaction, ip must not be locked.
+ */
+int /* error code */
+xfs_bmap_add_attrfork(
+ xfs_inode_t *ip, /* incore inode pointer */
+ int size, /* space new attribute needs */
+ int rsvd) /* xact may use reserved blks */
+{
+ xfs_fsblock_t firstblock; /* 1st block/ag allocated */
+ xfs_bmap_free_t flist; /* freed extent records */
+ xfs_mount_t *mp; /* mount structure */
+ xfs_trans_t *tp; /* transaction pointer */
+ int blks; /* space reservation */
+ int version = 1; /* superblock attr version */
+ int committed; /* xaction was committed */
+ int logflags; /* logging flags */
+ int error; /* error return value */
+ int cancel_flags = 0;
+
+ ASSERT(XFS_IFORK_Q(ip) == 0);
+
+ mp = ip->i_mount;
+ ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
+ tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
+ blks = XFS_ADDAFORK_SPACE_RES(mp);
+ if (rsvd)
+ tp->t_flags |= XFS_TRANS_RESERVE;
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_addafork, blks, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+ cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
+ XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
+ XFS_QMOPT_RES_REGBLKS);
+ if (error)
+ goto trans_cancel;
+ cancel_flags |= XFS_TRANS_ABORT;
+ if (XFS_IFORK_Q(ip))
+ goto trans_cancel;
+ if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
+ /*
+ * For inodes coming from pre-6.2 filesystems.
+ */
+ ASSERT(ip->i_d.di_aformat == 0);
+ ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
+ }
+ ASSERT(ip->i_d.di_anextents == 0);
+
+ xfs_trans_ijoin(tp, ip, 0);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ switch (ip->i_d.di_format) {
+ case XFS_DINODE_FMT_DEV:
+ ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
+ break;
+ case XFS_DINODE_FMT_UUID:
+ ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
+ break;
+ case XFS_DINODE_FMT_LOCAL:
+ case XFS_DINODE_FMT_EXTENTS:
+ case XFS_DINODE_FMT_BTREE:
+ ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
+ if (!ip->i_d.di_forkoff)
+ ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
+ else if (mp->m_flags & XFS_MOUNT_ATTR2)
+ version = 2;
+ break;
+ default:
+ ASSERT(0);
+ error = XFS_ERROR(EINVAL);
+ goto trans_cancel;
+ }
+
+ ASSERT(ip->i_afp == NULL);
+ ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
+ ip->i_afp->if_flags = XFS_IFEXTENTS;
+ logflags = 0;
+ xfs_bmap_init(&flist, &firstblock);
+ switch (ip->i_d.di_format) {
+ case XFS_DINODE_FMT_LOCAL:
+ error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
+ &logflags);
+ break;
+ case XFS_DINODE_FMT_EXTENTS:
+ error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
+ &flist, &logflags);
+ break;
+ case XFS_DINODE_FMT_BTREE:
+ error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
+ &logflags);
+ break;
+ default:
+ error = 0;
+ break;
+ }
+ if (logflags)
+ xfs_trans_log_inode(tp, ip, logflags);
+ if (error)
+ goto bmap_cancel;
+ if (!xfs_sb_version_hasattr(&mp->m_sb) ||
+ (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
+ __int64_t sbfields = 0;
+
+ spin_lock(&mp->m_sb_lock);
+ if (!xfs_sb_version_hasattr(&mp->m_sb)) {
+ xfs_sb_version_addattr(&mp->m_sb);
+ sbfields |= XFS_SB_VERSIONNUM;
+ }
+ if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
+ xfs_sb_version_addattr2(&mp->m_sb);
+ sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
+ }
+ if (sbfields) {
+ spin_unlock(&mp->m_sb_lock);
+ xfs_mod_sb(tp, sbfields);
+ } else
+ spin_unlock(&mp->m_sb_lock);
+ }
+
+ error = xfs_bmap_finish(&tp, &flist, &committed);
+ if (error)
+ goto bmap_cancel;
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+
+bmap_cancel:
+ xfs_bmap_cancel(&flist);
+trans_cancel:
+ xfs_trans_cancel(tp, cancel_flags);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
- * Called by xfs_bmapi to update file extent records and the btree
- * after allocating space (or doing a delayed allocation).
+ * Internal and external extent tree search functions.
*/
-STATIC int /* error */
-xfs_bmap_add_extent(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
- xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
- xfs_bmbt_irec_t *new, /* new data to add to file extents */
- xfs_fsblock_t *first, /* pointer to firstblock variable */
- xfs_bmap_free_t *flist, /* list of extents to be freed */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
- int whichfork, /* data or attr fork */
- int rsvd) /* OK to use reserved data blocks */
+
+/*
+ * Read in the extents to if_extents.
+ * All inode fields are set up by caller, we just traverse the btree
+ * and copy the records in. If the file system cannot contain unwritten
+ * extents, the records are checked for no "state" flags.
+ */
+int /* error */
+xfs_bmap_read_extents(
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_inode_t *ip, /* incore inode */
+ int whichfork) /* data or attr fork */
{
- xfs_btree_cur_t *cur; /* btree cursor or null */
- xfs_filblks_t da_new; /* new count del alloc blocks used */
- xfs_filblks_t da_old; /* old count del alloc blocks used */
+ struct xfs_btree_block *block; /* current btree block */
+ xfs_fsblock_t bno; /* block # of "block" */
+ xfs_buf_t *bp; /* buffer for "block" */
int error; /* error return value */
- xfs_ifork_t *ifp; /* inode fork ptr */
- int logflags; /* returned value */
- xfs_extnum_t nextents; /* number of extents in file now */
+ xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
+ xfs_extnum_t i, j; /* index into the extents list */
+ xfs_ifork_t *ifp; /* fork structure */
+ int level; /* btree level, for checking */
+ xfs_mount_t *mp; /* file system mount structure */
+ __be64 *pp; /* pointer to block address */
+ /* REFERENCED */
+ xfs_extnum_t room; /* number of entries there's room for */
- XFS_STATS_INC(xs_add_exlist);
- cur = *curp;
+ bno = NULLFSBLOCK;
+ mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- ASSERT(idx <= nextents);
- da_old = da_new = 0;
- error = 0;
+ exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
+ XFS_EXTFMT_INODE(ip);
+ block = ifp->if_broot;
/*
- * This is the first extent added to a new/empty file.
- * Special case this one, so other routines get to assume there are
- * already extents in the list.
+ * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
*/
- if (nextents == 0) {
- XFS_BMAP_TRACE_INSERT("insert empty", ip, 0, 1, new, NULL,
- whichfork);
- xfs_iext_insert(ifp, 0, 1, new);
- ASSERT(cur == NULL);
- ifp->if_lastex = 0;
- if (!ISNULLSTARTBLOCK(new->br_startblock)) {
- XFS_IFORK_NEXT_SET(ip, whichfork, 1);
- logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
- } else
- logflags = 0;
- /* DELTA: single new extent */
- if (delta) {
- if (delta->xed_startoff > new->br_startoff)
- delta->xed_startoff = new->br_startoff;
- if (delta->xed_blockcount <
- new->br_startoff + new->br_blockcount)
- delta->xed_blockcount = new->br_startoff +
- new->br_blockcount;
- }
- }
+ level = be16_to_cpu(block->bb_level);
+ ASSERT(level > 0);
+ pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
+ bno = be64_to_cpu(*pp);
+ ASSERT(bno != NULLDFSBNO);
+ ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
+ ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
/*
- * Any kind of new delayed allocation goes here.
+ * Go down the tree until leaf level is reached, following the first
+ * pointer (leftmost) at each level.
*/
- else if (ISNULLSTARTBLOCK(new->br_startblock)) {
- if (cur)
- ASSERT((cur->bc_private.b.flags &
- XFS_BTCUR_BPRV_WASDEL) == 0);
- if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
- &logflags, delta, rsvd)))
- goto done;
+ while (level-- > 0) {
+ error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
+ XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
+ if (error)
+ return error;
+ block = XFS_BUF_TO_BLOCK(bp);
+ XFS_WANT_CORRUPTED_GOTO(
+ xfs_bmap_sanity_check(mp, bp, level),
+ error0);
+ if (level == 0)
+ break;
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
+ bno = be64_to_cpu(*pp);
+ XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
+ xfs_trans_brelse(tp, bp);
}
/*
- * Real allocation off the end of the file.
+ * Here with bp and block set to the leftmost leaf node in the tree.
*/
- else if (idx == nextents) {
- if (cur)
- ASSERT((cur->bc_private.b.flags &
- XFS_BTCUR_BPRV_WASDEL) == 0);
- if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
- &logflags, delta, whichfork)))
- goto done;
- } else {
- xfs_bmbt_irec_t prev; /* old extent at offset idx */
+ room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+ i = 0;
+ /*
+ * Loop over all leaf nodes. Copy information to the extent records.
+ */
+ for (;;) {
+ xfs_bmbt_rec_t *frp;
+ xfs_fsblock_t nextbno;
+ xfs_extnum_t num_recs;
+ xfs_extnum_t start;
+ num_recs = xfs_btree_get_numrecs(block);
+ if (unlikely(i + num_recs > room)) {
+ ASSERT(i + num_recs <= room);
+ xfs_warn(ip->i_mount,
+ "corrupt dinode %Lu, (btree extents).",
+ (unsigned long long) ip->i_ino);
+ XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
+ XFS_ERRLEVEL_LOW, ip->i_mount, block);
+ goto error0;
+ }
+ XFS_WANT_CORRUPTED_GOTO(
+ xfs_bmap_sanity_check(mp, bp, 0),
+ error0);
/*
- * Get the record referred to by idx.
+ * Read-ahead the next leaf block, if any.
*/
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev);
+ nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
+ if (nextbno != NULLFSBLOCK)
+ xfs_btree_reada_bufl(mp, nextbno, 1,
+ &xfs_bmbt_buf_ops);
/*
- * If it's a real allocation record, and the new allocation ends
- * after the start of the referred to record, then we're filling
- * in a delayed or unwritten allocation with a real one, or
- * converting real back to unwritten.
+ * Copy records into the extent records.
*/
- if (!ISNULLSTARTBLOCK(new->br_startblock) &&
- new->br_startoff + new->br_blockcount > prev.br_startoff) {
- if (prev.br_state != XFS_EXT_UNWRITTEN &&
- ISNULLSTARTBLOCK(prev.br_startblock)) {
- da_old = STARTBLOCKVAL(prev.br_startblock);
- if (cur)
- ASSERT(cur->bc_private.b.flags &
- XFS_BTCUR_BPRV_WASDEL);
- if ((error = xfs_bmap_add_extent_delay_real(ip,
- idx, &cur, new, &da_new, first, flist,
- &logflags, delta, rsvd)))
- goto done;
- } else if (new->br_state == XFS_EXT_NORM) {
- ASSERT(new->br_state == XFS_EXT_NORM);
- if ((error = xfs_bmap_add_extent_unwritten_real(
- ip, idx, &cur, new, &logflags, delta)))
- goto done;
- } else {
- ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
- if ((error = xfs_bmap_add_extent_unwritten_real(
- ip, idx, &cur, new, &logflags, delta)))
- goto done;
+ frp = XFS_BMBT_REC_ADDR(mp, block, 1);
+ start = i;
+ for (j = 0; j < num_recs; j++, i++, frp++) {
+ xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
+ trp->l0 = be64_to_cpu(frp->l0);
+ trp->l1 = be64_to_cpu(frp->l1);
+ }
+ if (exntf == XFS_EXTFMT_NOSTATE) {
+ /*
+ * Check all attribute bmap btree records and
+ * any "older" data bmap btree records for a
+ * set bit in the "extent flag" position.
+ */
+ if (unlikely(xfs_check_nostate_extents(ifp,
+ start, num_recs))) {
+ XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
+ XFS_ERRLEVEL_LOW,
+ ip->i_mount);
+ goto error0;
}
- ASSERT(*curp == cur || *curp == NULL);
}
+ xfs_trans_brelse(tp, bp);
+ bno = nextbno;
/*
- * Otherwise we're filling in a hole with an allocation.
+ * If we've reached the end, stop.
*/
- else {
- if (cur)
- ASSERT((cur->bc_private.b.flags &
- XFS_BTCUR_BPRV_WASDEL) == 0);
- if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
- new, &logflags, delta, whichfork)))
- goto done;
- }
+ if (bno == NULLFSBLOCK)
+ break;
+ error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
+ XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
+ if (error)
+ return error;
+ block = XFS_BUF_TO_BLOCK(bp);
}
+ ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
+ ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
+ XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
+ return 0;
+error0:
+ xfs_trans_brelse(tp, bp);
+ return XFS_ERROR(EFSCORRUPTED);
+}
+
+
+/*
+ * Search the extent records for the entry containing block bno.
+ * If bno lies in a hole, point to the next entry. If bno lies
+ * past eof, *eofp will be set, and *prevp will contain the last
+ * entry (null if none). Else, *lastxp will be set to the index
+ * of the found entry; *gotp will contain the entry.
+ */
+STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
+xfs_bmap_search_multi_extents(
+ xfs_ifork_t *ifp, /* inode fork pointer */
+ xfs_fileoff_t bno, /* block number searched for */
+ int *eofp, /* out: end of file found */
+ xfs_extnum_t *lastxp, /* out: last extent index */
+ xfs_bmbt_irec_t *gotp, /* out: extent entry found */
+ xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
+{
+ xfs_bmbt_rec_host_t *ep; /* extent record pointer */
+ xfs_extnum_t lastx; /* last extent index */
- ASSERT(*curp == cur || *curp == NULL);
/*
- * Convert to a btree if necessary.
+ * Initialize the extent entry structure to catch access to
+ * uninitialized br_startblock field.
*/
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
- int tmp_logflags; /* partial log flag return val */
+ gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
+ gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
+ gotp->br_state = XFS_EXT_INVALID;
+#if XFS_BIG_BLKNOS
+ gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
+#else
+ gotp->br_startblock = 0xffffa5a5;
+#endif
+ prevp->br_startoff = NULLFILEOFF;
- ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
- flist, &cur, da_old > 0, &tmp_logflags, whichfork);
- logflags |= tmp_logflags;
- if (error)
- goto done;
+ ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
+ if (lastx > 0) {
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
+ }
+ if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
+ xfs_bmbt_get_all(ep, gotp);
+ *eofp = 0;
+ } else {
+ if (lastx > 0) {
+ *gotp = *prevp;
+ }
+ *eofp = 1;
+ ep = NULL;
+ }
+ *lastxp = lastx;
+ return ep;
+}
+
+/*
+ * Search the extents list for the inode, for the extent containing bno.
+ * If bno lies in a hole, point to the next entry. If bno lies past eof,
+ * *eofp will be set, and *prevp will contain the last entry (null if none).
+ * Else, *lastxp will be set to the index of the found
+ * entry; *gotp will contain the entry.
+ */
+STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
+xfs_bmap_search_extents(
+ xfs_inode_t *ip, /* incore inode pointer */
+ xfs_fileoff_t bno, /* block number searched for */
+ int fork, /* data or attr fork */
+ int *eofp, /* out: end of file found */
+ xfs_extnum_t *lastxp, /* out: last extent index */
+ xfs_bmbt_irec_t *gotp, /* out: extent entry found */
+ xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
+{
+ xfs_ifork_t *ifp; /* inode fork pointer */
+ xfs_bmbt_rec_host_t *ep; /* extent record pointer */
+
+ XFS_STATS_INC(xs_look_exlist);
+ ifp = XFS_IFORK_PTR(ip, fork);
+
+ ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
+
+ if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
+ !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
+ xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
+ "Access to block zero in inode %llu "
+ "start_block: %llx start_off: %llx "
+ "blkcnt: %llx extent-state: %x lastx: %x",
+ (unsigned long long)ip->i_ino,
+ (unsigned long long)gotp->br_startblock,
+ (unsigned long long)gotp->br_startoff,
+ (unsigned long long)gotp->br_blockcount,
+ gotp->br_state, *lastxp);
+ *lastxp = NULLEXTNUM;
+ *eofp = 1;
+ return NULL;
+ }
+ return ep;
+}
+
+/*
+ * Returns the file-relative block number of the first unused block(s)
+ * in the file with at least "len" logically contiguous blocks free.
+ * This is the lowest-address hole if the file has holes, else the first block
+ * past the end of file.
+ * Return 0 if the file is currently local (in-inode).
+ */
+int /* error */
+xfs_bmap_first_unused(
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_inode_t *ip, /* incore inode */
+ xfs_extlen_t len, /* size of hole to find */
+ xfs_fileoff_t *first_unused, /* unused block */
+ int whichfork) /* data or attr fork */
+{
+ int error; /* error return value */
+ int idx; /* extent record index */
+ xfs_ifork_t *ifp; /* inode fork pointer */
+ xfs_fileoff_t lastaddr; /* last block number seen */
+ xfs_fileoff_t lowest; /* lowest useful block */
+ xfs_fileoff_t max; /* starting useful block */
+ xfs_fileoff_t off; /* offset for this block */
+ xfs_extnum_t nextents; /* number of extent entries */
+
+ ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
+ XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
+ XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
+ if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+ *first_unused = 0;
+ return 0;
+ }
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+ (error = xfs_iread_extents(tp, ip, whichfork)))
+ return error;
+ lowest = *first_unused;
+ nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+ for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
+ xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
+ off = xfs_bmbt_get_startoff(ep);
+ /*
+ * See if the hole before this extent will work.
+ */
+ if (off >= lowest + len && off - max >= len) {
+ *first_unused = max;
+ return 0;
+ }
+ lastaddr = off + xfs_bmbt_get_blockcount(ep);
+ max = XFS_FILEOFF_MAX(lastaddr, lowest);
+ }
+ *first_unused = max;
+ return 0;
+}
+
+/*
+ * Returns the file-relative block number of the last block - 1 before
+ * last_block (input value) in the file.
+ * This is not based on i_size, it is based on the extent records.
+ * Returns 0 for local files, as they do not have extent records.
+ */
+int /* error */
+xfs_bmap_last_before(
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_inode_t *ip, /* incore inode */
+ xfs_fileoff_t *last_block, /* last block */
+ int whichfork) /* data or attr fork */
+{
+ xfs_fileoff_t bno; /* input file offset */
+ int eof; /* hit end of file */
+ xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
+ int error; /* error return value */
+ xfs_bmbt_irec_t got; /* current extent value */
+ xfs_ifork_t *ifp; /* inode fork pointer */
+ xfs_extnum_t lastx; /* last extent used */
+ xfs_bmbt_irec_t prev; /* previous extent value */
+
+ if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
+ return XFS_ERROR(EIO);
+ if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+ *last_block = 0;
+ return 0;
+ }
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ if (!(ifp->if_flags & XFS_IFEXTENTS) &&
+ (error = xfs_iread_extents(tp, ip, whichfork)))
+ return error;
+ bno = *last_block - 1;
+ ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
+ &prev);
+ if (eof || xfs_bmbt_get_startoff(ep) > bno) {
+ if (prev.br_startoff == NULLFILEOFF)
+ *last_block = 0;
+ else
+ *last_block = prev.br_startoff + prev.br_blockcount;
}
/*
- * Adjust for changes in reserved delayed indirect blocks.
- * Nothing to do for disk quotas here.
+ * Otherwise *last_block is already the right answer.
*/
- if (da_old || da_new) {
- xfs_filblks_t nblks;
-
- nblks = da_new;
- if (cur)
- nblks += cur->bc_private.b.allocated;
- ASSERT(nblks <= da_old);
- if (nblks < da_old)
- xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
- (int64_t)(da_old - nblks), rsvd);
+ return 0;
+}
+
+int
+xfs_bmap_last_extent(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ int whichfork,
+ struct xfs_bmbt_irec *rec,
+ int *is_empty)
+{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ int error;
+ int nextents;
+
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(tp, ip, whichfork);
+ if (error)
+ return error;
+ }
+
+ nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+ if (nextents == 0) {
+ *is_empty = 1;
+ return 0;
+ }
+
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
+ *is_empty = 0;
+ return 0;
+}
+
+/*
+ * Check the last inode extent to determine whether this allocation will result
+ * in blocks being allocated at the end of the file. When we allocate new data
+ * blocks at the end of the file which do not start at the previous data block,
+ * we will try to align the new blocks at stripe unit boundaries.
+ *
+ * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
+ * at, or past the EOF.
+ */
+STATIC int
+xfs_bmap_isaeof(
+ struct xfs_bmalloca *bma,
+ int whichfork)
+{
+ struct xfs_bmbt_irec rec;
+ int is_empty;
+ int error;
+
+ bma->aeof = 0;
+ error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
+ &is_empty);
+ if (error)
+ return error;
+
+ if (is_empty) {
+ bma->aeof = 1;
+ return 0;
}
+
/*
- * Clear out the allocated field, done with it now in any case.
+ * Check if we are allocation or past the last extent, or at least into
+ * the last delayed allocated extent.
*/
- if (cur) {
- cur->bc_private.b.allocated = 0;
- *curp = cur;
- }
-done:
-#ifdef DEBUG
- if (!error)
- xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
-#endif
- *logflagsp = logflags;
- return error;
+ bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
+ (bma->offset >= rec.br_startoff &&
+ isnullstartblock(rec.br_startblock));
+ return 0;
+}
+
+/*
+ * Returns the file-relative block number of the first block past eof in
+ * the file. This is not based on i_size, it is based on the extent records.
+ * Returns 0 for local files, as they do not have extent records.
+ */
+int
+xfs_bmap_last_offset(
+ struct xfs_inode *ip,
+ xfs_fileoff_t *last_block,
+ int whichfork)
+{
+ struct xfs_bmbt_irec rec;
+ int is_empty;
+ int error;
+
+ *last_block = 0;
+
+ if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
+ return 0;
+
+ if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+ return XFS_ERROR(EIO);
+
+ error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
+ if (error || is_empty)
+ return error;
+
+ *last_block = rec.br_startoff + rec.br_blockcount;
+ return 0;
+}
+
+/*
+ * Returns whether the selected fork of the inode has exactly one
+ * block or not. For the data fork we check this matches di_size,
+ * implying the file's range is 0..bsize-1.
+ */
+int /* 1=>1 block, 0=>otherwise */
+xfs_bmap_one_block(
+ xfs_inode_t *ip, /* incore inode */
+ int whichfork) /* data or attr fork */
+{
+ xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
+ xfs_ifork_t *ifp; /* inode fork pointer */
+ int rval; /* return value */
+ xfs_bmbt_irec_t s; /* internal version of extent */
+
+#ifndef DEBUG
+ if (whichfork == XFS_DATA_FORK)
+ return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
+#endif /* !DEBUG */
+ if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
+ return 0;
+ if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+ return 0;
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+ ep = xfs_iext_get_ext(ifp, 0);
+ xfs_bmbt_get_all(ep, &s);
+ rval = s.br_startoff == 0 && s.br_blockcount == 1;
+ if (rval && whichfork == XFS_DATA_FORK)
+ ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
+ return rval;
}
/*
- * Called by xfs_bmap_add_extent to handle cases converting a delayed
- * allocation to a real allocation.
+ * Extent tree manipulation functions used during allocation.
+ */
+
+/*
+ * Convert a delayed allocation to a real allocation.
*/
STATIC int /* error */
xfs_bmap_add_extent_delay_real(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
- xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
- xfs_bmbt_irec_t *new, /* new data to add to file extents */
- xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */
- xfs_fsblock_t *first, /* pointer to firstblock variable */
- xfs_bmap_free_t *flist, /* list of extents to be freed */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
- int rsvd) /* OK to use reserved data block allocation */
+ struct xfs_bmalloca *bma)
{
- xfs_btree_cur_t *cur; /* btree cursor */
+ struct xfs_bmbt_irec *new = &bma->got;
int diff; /* temp value */
xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
int error; /* error return value */
@@ -709,416 +1755,402 @@ xfs_bmap_add_extent_delay_real(
/* left is 0, right is 1, prev is 2 */
int rval=0; /* return value (logging flags) */
int state = 0;/* state bits, accessed thru macros */
- xfs_filblks_t temp=0; /* value for dnew calculations */
- xfs_filblks_t temp2=0;/* value for dnew calculations */
+ xfs_filblks_t da_new; /* new count del alloc blocks used */
+ xfs_filblks_t da_old; /* old count del alloc blocks used */
+ xfs_filblks_t temp=0; /* value for da_new calculations */
+ xfs_filblks_t temp2=0;/* value for da_new calculations */
int tmp_rval; /* partial logging flags */
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_FILLING, RIGHT_FILLING,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
+
+ ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK);
+
+ ASSERT(bma->idx >= 0);
+ ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+ ASSERT(!isnullstartblock(new->br_startblock));
+ ASSERT(!bma->cur ||
+ (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
+
+ XFS_STATS_INC(xs_add_exlist);
#define LEFT r[0]
#define RIGHT r[1]
#define PREV r[2]
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
-#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE \
- (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
/*
* Set up a bunch of variables to make the tests simpler.
*/
- cur = *curp;
- ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
- ep = xfs_iext_get_ext(ifp, idx);
+ ep = xfs_iext_get_ext(ifp, bma->idx);
xfs_bmbt_get_all(ep, &PREV);
new_endoff = new->br_startoff + new->br_blockcount;
ASSERT(PREV.br_startoff <= new->br_startoff);
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
+
+ da_old = startblockval(PREV.br_startblock);
+ da_new = 0;
+
/*
* Set flags determining what part of the previous delayed allocation
* extent is being replaced by a real allocation.
*/
- STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
- STATE_SET(RIGHT_FILLING,
- PREV.br_startoff + PREV.br_blockcount == new_endoff);
+ if (PREV.br_startoff == new->br_startoff)
+ state |= BMAP_LEFT_FILLING;
+ if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
+ state |= BMAP_RIGHT_FILLING;
+
/*
* Check and set flags if this segment has a left neighbor.
* Don't set contiguous if the combined extent would be too large.
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
- STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
+ if (bma->idx > 0) {
+ state |= BMAP_LEFT_VALID;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
+
+ if (isnullstartblock(LEFT.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
- LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
- LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
- LEFT.br_state == new->br_state &&
- LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
+
+ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+ LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
+ LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
+ LEFT.br_state == new->br_state &&
+ LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
/*
* Check and set flags if this segment has a right neighbor.
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
- STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
+ if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+ state |= BMAP_RIGHT_VALID;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
+
+ if (isnullstartblock(RIGHT.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
- new_endoff == RIGHT.br_startoff &&
- new->br_startblock + new->br_blockcount ==
- RIGHT.br_startblock &&
- new->br_state == RIGHT.br_state &&
- new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
- ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
- MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
- LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
- <= MAXEXTLEN));
+
+ if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+ new_endoff == RIGHT.br_startoff &&
+ new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
+ new->br_state == RIGHT.br_state &&
+ new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
+ ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING)) !=
+ (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING) ||
+ LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
+ <= MAXEXTLEN))
+ state |= BMAP_RIGHT_CONTIG;
+
error = 0;
/*
* Switch out based on the FILLING and CONTIG state bits.
*/
- switch (SWITCH_STATE) {
-
- case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
* The left and right neighbors are both contiguous with new.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+ bma->idx--;
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
LEFT.br_blockcount + PREV.br_blockcount +
RIGHT.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 2);
- ip->i_df.if_lastex = idx - 1;
- ip->i_d.di_nextents--;
- if (cur == NULL)
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+
+ xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
+ bma->ip->i_d.di_nextents--;
+ if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
+ error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
RIGHT.br_startblock,
- RIGHT.br_blockcount, &i)))
+ RIGHT.br_blockcount, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_delete(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_btree_delete(bma->cur, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_btree_decrement(bma->cur, 0, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount +
PREV.br_blockcount +
- RIGHT.br_blockcount, LEFT.br_state)))
+ RIGHT.br_blockcount, LEFT.br_state);
+ if (error)
goto done;
}
- *dnew = 0;
- /* DELTA: Three in-core extents are replaced by one. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
* The left neighbor is contiguous, the right is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+ bma->idx--;
+
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
LEFT.br_blockcount + PREV.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
- ip->i_df.if_lastex = idx - 1;
- XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 1);
- if (cur == NULL)
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+
+ xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
+ if (bma->cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
+ error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
LEFT.br_startblock, LEFT.br_blockcount,
- &i)))
+ &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount +
- PREV.br_blockcount, LEFT.br_state)))
+ PREV.br_blockcount, LEFT.br_state);
+ if (error)
goto done;
}
- *dnew = 0;
- /* DELTA: Two in-core extents are replaced by one. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
* The right neighbor is contiguous, the left is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
xfs_bmbt_set_startblock(ep, new->br_startblock);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount + RIGHT.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK);
- ip->i_df.if_lastex = idx;
- XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx + 1, 1);
- if (cur == NULL)
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+
+ xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
+ if (bma->cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
+ error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
RIGHT.br_startblock,
- RIGHT.br_blockcount, &i)))
+ RIGHT.br_blockcount, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
new->br_startblock,
PREV.br_blockcount +
- RIGHT.br_blockcount, PREV.br_state)))
+ RIGHT.br_blockcount, PREV.br_state);
+ if (error)
goto done;
}
- *dnew = 0;
- /* DELTA: Two in-core extents are replaced by one. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
- case MASK2(LEFT_FILLING, RIGHT_FILLING):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
/*
* Filling in all of a previously delayed allocation extent.
* Neither the left nor right neighbors are contiguous with
* the new one.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
xfs_bmbt_set_startblock(ep, new->br_startblock);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK);
- ip->i_df.if_lastex = idx;
- ip->i_d.di_nextents++;
- if (cur == NULL)
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+
+ bma->ip->i_d.di_nextents++;
+ if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+ error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
new->br_startblock, new->br_blockcount,
- &i)))
+ &i);
+ if (error)
goto done;
- ASSERT(i == 0);
- cur->bc_rec.b.br_state = XFS_EXT_NORM;
- if ((error = xfs_bmbt_insert(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
+ error = xfs_btree_insert(bma->cur, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- *dnew = 0;
- /* DELTA: The in-core extent described by new changed type. */
- temp = new->br_startoff;
- temp2 = new->br_blockcount;
break;
- case MASK2(LEFT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
/*
* Filling in the first part of a previous delayed allocation.
* The left neighbor is contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
LEFT.br_blockcount + new->br_blockcount);
xfs_bmbt_set_startoff(ep,
PREV.br_startoff + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
+
temp = PREV.br_blockcount - new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
- ip->i_df.if_lastex = idx - 1;
- if (cur == NULL)
+ if (bma->cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
+ error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
LEFT.br_startblock, LEFT.br_blockcount,
- &i)))
+ &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount +
new->br_blockcount,
- LEFT.br_state)))
+ LEFT.br_state);
+ if (error)
goto done;
}
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- STARTBLOCKVAL(PREV.br_startblock));
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
- *dnew = temp;
- /* DELTA: The boundary between two in-core extents moved. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount;
+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
+ startblockval(PREV.br_startblock));
+ xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+
+ bma->idx--;
break;
- case MASK(LEFT_FILLING):
+ case BMAP_LEFT_FILLING:
/*
* Filling in the first part of a previous delayed allocation.
* The left neighbor is not contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
xfs_bmbt_set_startoff(ep, new_endoff);
temp = PREV.br_blockcount - new->br_blockcount;
xfs_bmbt_set_blockcount(ep, temp);
- XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx, 1, new);
- ip->i_df.if_lastex = idx;
- ip->i_d.di_nextents++;
- if (cur == NULL)
+ xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
+ bma->ip->i_d.di_nextents++;
+ if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+ error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
new->br_startblock, new->br_blockcount,
- &i)))
+ &i);
+ if (error)
goto done;
- ASSERT(i == 0);
- cur->bc_rec.b.br_state = XFS_EXT_NORM;
- if ((error = xfs_bmbt_insert(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
+ error = xfs_btree_insert(bma->cur, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_nextents > ip->i_df.if_ext_max) {
- error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
- first, flist, &cur, 1, &tmp_rval,
- XFS_DATA_FORK);
+
+ if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
+ error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
+ bma->firstblock, bma->flist,
+ &bma->cur, 1, &tmp_rval, XFS_DATA_FORK);
rval |= tmp_rval;
if (error)
goto done;
}
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- STARTBLOCKVAL(PREV.br_startblock) -
- (cur ? cur->bc_private.b.allocated : 0));
- ep = xfs_iext_get_ext(ifp, idx + 1);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK);
- *dnew = temp;
- /* DELTA: One in-core extent is split in two. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
+ startblockval(PREV.br_startblock) -
+ (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+ ep = xfs_iext_get_ext(ifp, bma->idx + 1);
+ xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
+ trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
break;
- case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in the last part of a previous delayed allocation.
* The right neighbor is contiguous with the new allocation.
*/
temp = PREV.br_blockcount - new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
+ xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
new->br_startoff, new->br_startblock,
new->br_blockcount + RIGHT.br_blockcount,
RIGHT.br_state);
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK);
- ip->i_df.if_lastex = idx + 1;
- if (cur == NULL)
+ trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
+ if (bma->cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
+ error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
RIGHT.br_startblock,
- RIGHT.br_blockcount, &i)))
+ RIGHT.br_blockcount, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_update(cur, new->br_startoff,
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_bmbt_update(bma->cur, new->br_startoff,
new->br_startblock,
new->br_blockcount +
RIGHT.br_blockcount,
- RIGHT.br_state)))
+ RIGHT.br_state);
+ if (error)
goto done;
}
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- STARTBLOCKVAL(PREV.br_startblock));
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
- *dnew = temp;
- /* DELTA: The boundary between two in-core extents moved. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount +
- RIGHT.br_blockcount;
+
+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
+ startblockval(PREV.br_startblock));
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+ xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+
+ bma->idx++;
break;
- case MASK(RIGHT_FILLING):
+ case BMAP_RIGHT_FILLING:
/*
* Filling in the last part of a previous delayed allocation.
* The right neighbor is not contiguous.
*/
temp = PREV.br_blockcount - new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
- XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 1, new);
- ip->i_df.if_lastex = idx + 1;
- ip->i_d.di_nextents++;
- if (cur == NULL)
+ xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
+ bma->ip->i_d.di_nextents++;
+ if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+ error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
new->br_startblock, new->br_blockcount,
- &i)))
+ &i);
+ if (error)
goto done;
- ASSERT(i == 0);
- cur->bc_rec.b.br_state = XFS_EXT_NORM;
- if ((error = xfs_bmbt_insert(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
+ error = xfs_btree_insert(bma->cur, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_nextents > ip->i_df.if_ext_max) {
- error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
- first, flist, &cur, 1, &tmp_rval,
- XFS_DATA_FORK);
+
+ if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
+ error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
+ bma->firstblock, bma->flist, &bma->cur, 1,
+ &tmp_rval, XFS_DATA_FORK);
rval |= tmp_rval;
if (error)
goto done;
}
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- STARTBLOCKVAL(PREV.br_startblock) -
- (cur ? cur->bc_private.b.allocated : 0));
- ep = xfs_iext_get_ext(ifp, idx);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
- *dnew = temp;
- /* DELTA: One in-core extent is split in two. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
+ startblockval(PREV.br_startblock) -
+ (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+ ep = xfs_iext_get_ext(ifp, bma->idx);
+ xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+
+ bma->idx++;
break;
case 0:
@@ -1126,134 +2158,149 @@ xfs_bmap_add_extent_delay_real(
* Filling in the middle part of a previous delayed allocation.
* Contiguity is impossible here.
* This case is avoided almost all the time.
+ *
+ * We start with a delayed allocation:
+ *
+ * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
+ * PREV @ idx
+ *
+ * and we are allocating:
+ * +rrrrrrrrrrrrrrrrr+
+ * new
+ *
+ * and we set it up for insertion as:
+ * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
+ * new
+ * PREV @ idx LEFT RIGHT
+ * inserted at idx + 1
*/
temp = new->br_startoff - PREV.br_startoff;
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK);
- xfs_bmbt_set_blockcount(ep, temp);
- r[0] = *new;
- r[1].br_state = PREV.br_state;
- r[1].br_startblock = 0;
- r[1].br_startoff = new_endoff;
temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
- r[1].br_blockcount = temp2;
- XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1],
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
- ip->i_df.if_lastex = idx + 1;
- ip->i_d.di_nextents++;
- if (cur == NULL)
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
+ xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
+ LEFT = *new;
+ RIGHT.br_state = PREV.br_state;
+ RIGHT.br_startblock = nullstartblock(
+ (int)xfs_bmap_worst_indlen(bma->ip, temp2));
+ RIGHT.br_startoff = new_endoff;
+ RIGHT.br_blockcount = temp2;
+ /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
+ xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
+ bma->ip->i_d.di_nextents++;
+ if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+ error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
new->br_startblock, new->br_blockcount,
- &i)))
+ &i);
+ if (error)
goto done;
- ASSERT(i == 0);
- cur->bc_rec.b.br_state = XFS_EXT_NORM;
- if ((error = xfs_bmbt_insert(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
+ error = xfs_btree_insert(bma->cur, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_nextents > ip->i_df.if_ext_max) {
- error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
- first, flist, &cur, 1, &tmp_rval,
- XFS_DATA_FORK);
+
+ if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
+ error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
+ bma->firstblock, bma->flist, &bma->cur,
+ 1, &tmp_rval, XFS_DATA_FORK);
rval |= tmp_rval;
if (error)
goto done;
}
- temp = xfs_bmap_worst_indlen(ip, temp);
- temp2 = xfs_bmap_worst_indlen(ip, temp2);
- diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) -
- (cur ? cur->bc_private.b.allocated : 0));
- if (diff > 0 &&
- xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) {
- /*
- * Ick gross gag me with a spoon.
- */
- ASSERT(0); /* want to see if this ever happens! */
- while (diff > 0) {
- if (temp) {
- temp--;
- diff--;
- if (!diff ||
- !xfs_mod_incore_sb(ip->i_mount,
- XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
- break;
- }
- if (temp2) {
- temp2--;
- diff--;
- if (!diff ||
- !xfs_mod_incore_sb(ip->i_mount,
- XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
- break;
- }
- }
+ temp = xfs_bmap_worst_indlen(bma->ip, temp);
+ temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
+ diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
+ (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+ if (diff > 0) {
+ error = xfs_icsb_modify_counters(bma->ip->i_mount,
+ XFS_SBS_FDBLOCKS,
+ -((int64_t)diff), 0);
+ ASSERT(!error);
+ if (error)
+ goto done;
}
- ep = xfs_iext_get_ext(ifp, idx);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
- xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
- NULLSTARTBLOCK((int)temp2));
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
- *dnew = temp + temp2;
- /* DELTA: One in-core extent is split in three. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
+
+ ep = xfs_iext_get_ext(ifp, bma->idx);
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
+ xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
+ nullstartblock((int)temp2));
+ trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
+
+ bma->idx++;
+ da_new = temp + temp2;
break;
- case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK2(LEFT_FILLING, RIGHT_CONTIG):
- case MASK2(RIGHT_FILLING, LEFT_CONTIG):
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
- case MASK(LEFT_CONTIG):
- case MASK(RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_CONTIG:
+ case BMAP_RIGHT_CONTIG:
/*
* These cases are all impossible.
*/
ASSERT(0);
}
- *curp = cur;
- if (delta) {
- temp2 += temp;
- if (delta->xed_startoff > temp)
- delta->xed_startoff = temp;
- if (delta->xed_blockcount < temp2)
- delta->xed_blockcount = temp2;
+
+ /* convert to a btree if necessary */
+ if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
+ int tmp_logflags; /* partial log flag return val */
+
+ ASSERT(bma->cur == NULL);
+ error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
+ bma->firstblock, bma->flist, &bma->cur,
+ da_old > 0, &tmp_logflags, XFS_DATA_FORK);
+ bma->logflags |= tmp_logflags;
+ if (error)
+ goto done;
}
+
+ /* adjust for changes in reserved delayed indirect blocks */
+ if (da_old || da_new) {
+ temp = da_new;
+ if (bma->cur)
+ temp += bma->cur->bc_private.b.allocated;
+ ASSERT(temp <= da_old);
+ if (temp < da_old)
+ xfs_icsb_modify_counters(bma->ip->i_mount,
+ XFS_SBS_FDBLOCKS,
+ (int64_t)(da_old - temp), 0);
+ }
+
+ /* clear out the allocated field, done with it now in any case. */
+ if (bma->cur)
+ bma->cur->bc_private.b.allocated = 0;
+
+ xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK);
done:
- *logflagsp = rval;
+ bma->logflags |= rval;
return error;
#undef LEFT
#undef RIGHT
#undef PREV
-#undef MASK
-#undef MASK2
-#undef MASK3
-#undef MASK4
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
- * Called by xfs_bmap_add_extent to handle cases converting an unwritten
- * allocation to a real allocation or vice versa.
+ * Convert an unwritten allocation to a real allocation or vice versa.
*/
STATIC int /* error */
xfs_bmap_add_extent_unwritten_real(
+ struct xfs_trans *tp,
xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
+ xfs_extnum_t *idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta) /* Change made to incore extents */
+ xfs_fsblock_t *first, /* pointer to firstblock variable */
+ xfs_bmap_free_t *flist, /* list of extents to be freed */
+ int *logflagsp) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
@@ -1267,36 +2314,27 @@ xfs_bmap_add_extent_unwritten_real(
/* left is 0, right is 1, prev is 2 */
int rval=0; /* return value (logging flags) */
int state = 0;/* state bits, accessed thru macros */
- xfs_filblks_t temp=0;
- xfs_filblks_t temp2=0;
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_FILLING, RIGHT_FILLING,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
+
+ *logflagsp = 0;
+
+ cur = *curp;
+ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+
+ ASSERT(*idx >= 0);
+ ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+ ASSERT(!isnullstartblock(new->br_startblock));
+
+ XFS_STATS_INC(xs_add_exlist);
#define LEFT r[0]
#define RIGHT r[1]
#define PREV r[2]
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
-#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE \
- (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
/*
* Set up a bunch of variables to make the tests simpler.
*/
error = 0;
- cur = *curp;
- ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
- ep = xfs_iext_get_ext(ifp, idx);
+ ep = xfs_iext_get_ext(ifp, *idx);
xfs_bmbt_get_all(ep, &PREV);
newext = new->br_state;
oldext = (newext == XFS_EXT_UNWRITTEN) ?
@@ -1305,69 +2343,80 @@ xfs_bmap_add_extent_unwritten_real(
new_endoff = new->br_startoff + new->br_blockcount;
ASSERT(PREV.br_startoff <= new->br_startoff);
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
+
/*
* Set flags determining what part of the previous oldext allocation
* extent is being replaced by a newext allocation.
*/
- STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
- STATE_SET(RIGHT_FILLING,
- PREV.br_startoff + PREV.br_blockcount == new_endoff);
+ if (PREV.br_startoff == new->br_startoff)
+ state |= BMAP_LEFT_FILLING;
+ if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
+ state |= BMAP_RIGHT_FILLING;
+
/*
* Check and set flags if this segment has a left neighbor.
* Don't set contiguous if the combined extent would be too large.
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
- STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
+ if (*idx > 0) {
+ state |= BMAP_LEFT_VALID;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
+
+ if (isnullstartblock(LEFT.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
- LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
- LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
- LEFT.br_state == newext &&
- LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
+
+ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+ LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
+ LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
+ LEFT.br_state == newext &&
+ LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
/*
* Check and set flags if this segment has a right neighbor.
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
- STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
+ if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+ state |= BMAP_RIGHT_VALID;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
+ if (isnullstartblock(RIGHT.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
- new_endoff == RIGHT.br_startoff &&
- new->br_startblock + new->br_blockcount ==
- RIGHT.br_startblock &&
- newext == RIGHT.br_state &&
- new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
- ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
- MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
- LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
- <= MAXEXTLEN));
+
+ if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+ new_endoff == RIGHT.br_startoff &&
+ new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
+ newext == RIGHT.br_state &&
+ new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
+ ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING)) !=
+ (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING) ||
+ LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
+ <= MAXEXTLEN))
+ state |= BMAP_RIGHT_CONTIG;
+
/*
* Switch out based on the FILLING and CONTIG state bits.
*/
- switch (SWITCH_STATE) {
-
- case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Setting all of a previous oldext extent to newext.
* The left and right neighbors are both contiguous with new.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+ --*idx;
+
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
LEFT.br_blockcount + PREV.br_blockcount +
RIGHT.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 2);
- ip->i_df.if_lastex = idx - 1;
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, *idx + 1, 2, state);
ip->i_d.di_nextents -= 2;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1377,46 +2426,40 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_startblock,
RIGHT.br_blockcount, &i)))
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_delete(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ if ((error = xfs_btree_delete(cur, &i)))
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_delete(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ if ((error = xfs_btree_delete(cur, &i)))
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount + PREV.br_blockcount +
RIGHT.br_blockcount, LEFT.br_state)))
goto done;
}
- /* DELTA: Three in-core extents are replaced by one. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
/*
* Setting all of a previous oldext extent to newext.
* The left neighbor is contiguous, the right is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+ --*idx;
+
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
LEFT.br_blockcount + PREV.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
- ip->i_df.if_lastex = idx - 1;
- XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 1);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, *idx + 1, 1, state);
ip->i_d.di_nextents--;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1426,40 +2469,32 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock, PREV.br_blockcount,
&i)))
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_delete(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ if ((error = xfs_btree_delete(cur, &i)))
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount + PREV.br_blockcount,
LEFT.br_state)))
goto done;
}
- /* DELTA: Two in-core extents are replaced by one. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Setting all of a previous oldext extent to newext.
* The right neighbor is contiguous, the left is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount + RIGHT.br_blockcount);
xfs_bmbt_set_state(ep, newext);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx,
- XFS_DATA_FORK);
- ip->i_df.if_lastex = idx;
- XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx + 1, 1);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ xfs_iext_remove(ip, *idx + 1, 1, state);
ip->i_d.di_nextents--;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1469,37 +2504,31 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_startblock,
RIGHT.br_blockcount, &i)))
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_delete(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ if ((error = xfs_btree_delete(cur, &i)))
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
if ((error = xfs_bmbt_update(cur, new->br_startoff,
new->br_startblock,
new->br_blockcount + RIGHT.br_blockcount,
newext)))
goto done;
}
- /* DELTA: Two in-core extents are replaced by one. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
- case MASK2(LEFT_FILLING, RIGHT_FILLING):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
/*
* Setting all of a previous oldext extent to newext.
* Neither the left nor right neighbors are contiguous with
* the new one.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
xfs_bmbt_set_state(ep, newext);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx,
- XFS_DATA_FORK);
- ip->i_df.if_lastex = idx;
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
if (cur == NULL)
rval = XFS_ILOG_DEXT;
else {
@@ -1508,39 +2537,35 @@ xfs_bmap_add_extent_unwritten_real(
new->br_startblock, new->br_blockcount,
&i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
if ((error = xfs_bmbt_update(cur, new->br_startoff,
new->br_startblock, new->br_blockcount,
newext)))
goto done;
}
- /* DELTA: The in-core extent described by new changed type. */
- temp = new->br_startoff;
- temp2 = new->br_blockcount;
break;
- case MASK2(LEFT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
/*
* Setting the first part of a previous oldext extent to newext.
* The left neighbor is contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1,
- XFS_DATA_FORK);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+ trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
LEFT.br_blockcount + new->br_blockcount);
xfs_bmbt_set_startoff(ep,
PREV.br_startoff + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
+
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
xfs_bmbt_set_startblock(ep,
new->br_startblock + new->br_blockcount);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx,
- XFS_DATA_FORK);
- ip->i_df.if_lastex = idx - 1;
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+ --*idx;
+
if (cur == NULL)
rval = XFS_ILOG_DEXT;
else {
@@ -1549,44 +2574,39 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock, PREV.br_blockcount,
&i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
if ((error = xfs_bmbt_update(cur,
PREV.br_startoff + new->br_blockcount,
PREV.br_startblock + new->br_blockcount,
PREV.br_blockcount - new->br_blockcount,
oldext)))
goto done;
- if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+ if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- if (xfs_bmbt_update(cur, LEFT.br_startoff,
+ error = xfs_bmbt_update(cur, LEFT.br_startoff,
LEFT.br_startblock,
LEFT.br_blockcount + new->br_blockcount,
- LEFT.br_state))
+ LEFT.br_state);
+ if (error)
goto done;
}
- /* DELTA: The boundary between two in-core extents moved. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount;
break;
- case MASK(LEFT_FILLING):
+ case BMAP_LEFT_FILLING:
/*
* Setting the first part of a previous oldext extent to newext.
* The left neighbor is not contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
xfs_bmbt_set_startoff(ep, new_endoff);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
xfs_bmbt_set_startblock(ep,
new->br_startblock + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx, 1, new);
- ip->i_df.if_lastex = idx;
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+ xfs_iext_insert(ip, *idx, 1, new, state);
ip->i_d.di_nextents++;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1596,7 +2616,7 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock, PREV.br_blockcount,
&i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
if ((error = xfs_bmbt_update(cur,
PREV.br_startoff + new->br_blockcount,
PREV.br_startblock + new->br_blockcount,
@@ -1604,34 +2624,30 @@ xfs_bmap_add_extent_unwritten_real(
oldext)))
goto done;
cur->bc_rec.b = *new;
- if ((error = xfs_bmbt_insert(cur, &i)))
+ if ((error = xfs_btree_insert(cur, &i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- /* DELTA: One in-core extent is split in two. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
break;
- case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Setting the last part of a previous oldext extent to newext.
* The right neighbor is contiguous with the new allocation.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx,
- XFS_DATA_FORK);
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+ ++*idx;
+
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
new->br_startoff, new->br_startblock,
new->br_blockcount + RIGHT.br_blockcount, newext);
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1,
- XFS_DATA_FORK);
- ip->i_df.if_lastex = idx + 1;
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
if (cur == NULL)
rval = XFS_ILOG_DEXT;
else {
@@ -1640,13 +2656,13 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock,
PREV.br_blockcount, &i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
PREV.br_startblock,
PREV.br_blockcount - new->br_blockcount,
oldext)))
goto done;
- if ((error = xfs_bmbt_increment(cur, 0, &i)))
+ if ((error = xfs_btree_increment(cur, 0, &i)))
goto done;
if ((error = xfs_bmbt_update(cur, new->br_startoff,
new->br_startblock,
@@ -1654,25 +2670,21 @@ xfs_bmap_add_extent_unwritten_real(
newext)))
goto done;
}
- /* DELTA: The boundary between two in-core extents moved. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
- case MASK(RIGHT_FILLING):
+ case BMAP_RIGHT_FILLING:
/*
* Setting the last part of a previous oldext extent to newext.
* The right neighbor is not contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 1, new);
- ip->i_df.if_lastex = idx + 1;
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+ ++*idx;
+ xfs_iext_insert(ip, *idx, 1, new, state);
+
ip->i_d.di_nextents++;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1682,7 +2694,7 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock, PREV.br_blockcount,
&i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
PREV.br_startblock,
PREV.br_blockcount - new->br_blockcount,
@@ -1692,15 +2704,12 @@ xfs_bmap_add_extent_unwritten_real(
new->br_startblock, new->br_blockcount,
&i)))
goto done;
- ASSERT(i == 0);
+ XFS_WANT_CORRUPTED_GOTO(i == 0, done);
cur->bc_rec.b.br_state = XFS_EXT_NORM;
- if ((error = xfs_bmbt_insert(cur, &i)))
+ if ((error = xfs_btree_insert(cur, &i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- /* DELTA: One in-core extent is split in two. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
break;
case 0:
@@ -1709,20 +2718,21 @@ xfs_bmap_add_extent_unwritten_real(
* newext. Contiguity is impossible here.
* One extent becomes three extents.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
new->br_startoff - PREV.br_startoff);
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
r[0] = *new;
r[1].br_startoff = new_endoff;
r[1].br_blockcount =
PREV.br_startoff + PREV.br_blockcount - new_endoff;
r[1].br_startblock = new->br_startblock + new->br_blockcount;
r[1].br_state = oldext;
- XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1],
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
- ip->i_df.if_lastex = idx + 1;
+
+ ++*idx;
+ xfs_iext_insert(ip, *idx, 2, &r[0], state);
+
ip->i_d.di_nextents += 2;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1732,84 +2742,86 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_startblock, PREV.br_blockcount,
&i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
/* new right extent - oldext */
if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
r[1].br_startblock, r[1].br_blockcount,
r[1].br_state)))
goto done;
/* new left extent - oldext */
- PREV.br_blockcount =
- new->br_startoff - PREV.br_startoff;
cur->bc_rec.b = PREV;
- if ((error = xfs_bmbt_insert(cur, &i)))
+ cur->bc_rec.b.br_blockcount =
+ new->br_startoff - PREV.br_startoff;
+ if ((error = xfs_btree_insert(cur, &i)))
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_increment(cur, 0, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ /*
+ * Reset the cursor to the position of the new extent
+ * we are about to insert as we can't trust it after
+ * the previous insert.
+ */
+ if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
+ new->br_startblock, new->br_blockcount,
+ &i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 0, done);
/* new middle extent - newext */
- cur->bc_rec.b = *new;
- if ((error = xfs_bmbt_insert(cur, &i)))
+ cur->bc_rec.b.br_state = new->br_state;
+ if ((error = xfs_btree_insert(cur, &i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- /* DELTA: One in-core extent is split in three. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK2(LEFT_FILLING, RIGHT_CONTIG):
- case MASK2(RIGHT_FILLING, LEFT_CONTIG):
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
- case MASK(LEFT_CONTIG):
- case MASK(RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_CONTIG:
+ case BMAP_RIGHT_CONTIG:
/*
* These cases are all impossible.
*/
ASSERT(0);
}
- *curp = cur;
- if (delta) {
- temp2 += temp;
- if (delta->xed_startoff > temp)
- delta->xed_startoff = temp;
- if (delta->xed_blockcount < temp2)
- delta->xed_blockcount = temp2;
+
+ /* convert to a btree if necessary */
+ if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
+ int tmp_logflags; /* partial log flag return val */
+
+ ASSERT(cur == NULL);
+ error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
+ 0, &tmp_logflags, XFS_DATA_FORK);
+ *logflagsp |= tmp_logflags;
+ if (error)
+ goto done;
+ }
+
+ /* clear out the allocated field, done with it now in any case. */
+ if (cur) {
+ cur->bc_private.b.allocated = 0;
+ *curp = cur;
}
+
+ xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
done:
- *logflagsp = rval;
+ *logflagsp |= rval;
return error;
#undef LEFT
#undef RIGHT
#undef PREV
-#undef MASK
-#undef MASK2
-#undef MASK3
-#undef MASK4
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
- * Called by xfs_bmap_add_extent to handle cases converting a hole
- * to a delayed allocation.
+ * Convert a hole to a delayed allocation.
*/
-/*ARGSUSED*/
-STATIC int /* error */
+STATIC void
xfs_bmap_add_extent_hole_delay(
xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
- xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
- int rsvd) /* OK to allocate reserved blocks */
+ xfs_extnum_t *idx, /* extent number to update/insert */
+ xfs_bmbt_irec_t *new) /* new data to add to file extents */
{
- xfs_bmbt_rec_host_t *ep; /* extent record for idx */
xfs_ifork_t *ifp; /* inode fork pointer */
xfs_bmbt_irec_t left; /* left neighbor extent entry */
xfs_filblks_t newlen=0; /* new indirect size */
@@ -1817,130 +2829,112 @@ xfs_bmap_add_extent_hole_delay(
xfs_bmbt_irec_t right; /* right neighbor extent entry */
int state; /* state bits, accessed thru macros */
xfs_filblks_t temp=0; /* temp for indirect calculations */
- xfs_filblks_t temp2=0;
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
-
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
- ep = xfs_iext_get_ext(ifp, idx);
state = 0;
- ASSERT(ISNULLSTARTBLOCK(new->br_startblock));
+ ASSERT(isnullstartblock(new->br_startblock));
+
/*
* Check and set flags if this segment has a left neighbor
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
- STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
+ if (*idx > 0) {
+ state |= BMAP_LEFT_VALID;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
+
+ if (isnullstartblock(left.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
+
/*
* Check and set flags if the current (right) segment exists.
* If it doesn't exist, we're converting the hole at end-of-file.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
- xfs_bmbt_get_all(ep, &right);
- STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
+ if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+ state |= BMAP_RIGHT_VALID;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
+
+ if (isnullstartblock(right.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
+
/*
* Set contiguity flags on the left and right neighbors.
* Don't let extents get too large, even if the pieces are contiguous.
*/
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) &&
- left.br_startoff + left.br_blockcount == new->br_startoff &&
- left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) &&
- new->br_startoff + new->br_blockcount == right.br_startoff &&
- new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
- (!STATE_TEST(LEFT_CONTIG) ||
- (left.br_blockcount + new->br_blockcount +
- right.br_blockcount <= MAXEXTLEN)));
+ if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
+ left.br_startoff + left.br_blockcount == new->br_startoff &&
+ left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
+ if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
+ new->br_startoff + new->br_blockcount == right.br_startoff &&
+ new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
+ (!(state & BMAP_LEFT_CONTIG) ||
+ (left.br_blockcount + new->br_blockcount +
+ right.br_blockcount <= MAXEXTLEN)))
+ state |= BMAP_RIGHT_CONTIG;
+
/*
* Switch out based on the contiguity flags.
*/
- switch (SWITCH_STATE) {
-
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with delayed allocations
* on the left and on the right.
* Merge all three into a single extent record.
*/
+ --*idx;
temp = left.br_blockcount + new->br_blockcount +
right.br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
- oldlen = STARTBLOCKVAL(left.br_startblock) +
- STARTBLOCKVAL(new->br_startblock) +
- STARTBLOCKVAL(right.br_startblock);
+
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
+ oldlen = startblockval(left.br_startblock) +
+ startblockval(new->br_startblock) +
+ startblockval(right.br_startblock);
newlen = xfs_bmap_worst_indlen(ip, temp);
- xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
- NULLSTARTBLOCK((int)newlen));
- XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 1);
- ip->i_df.if_lastex = idx - 1;
- /* DELTA: Two in-core extents were replaced by one. */
- temp2 = temp;
- temp = left.br_startoff;
+ xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
+ nullstartblock((int)newlen));
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, *idx + 1, 1, state);
break;
- case MASK(LEFT_CONTIG):
+ case BMAP_LEFT_CONTIG:
/*
* New allocation is contiguous with a delayed allocation
* on the left.
* Merge the new allocation with the left neighbor.
*/
+ --*idx;
temp = left.br_blockcount + new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1,
- XFS_DATA_FORK);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
- oldlen = STARTBLOCKVAL(left.br_startblock) +
- STARTBLOCKVAL(new->br_startblock);
+
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
+ oldlen = startblockval(left.br_startblock) +
+ startblockval(new->br_startblock);
newlen = xfs_bmap_worst_indlen(ip, temp);
- xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
- NULLSTARTBLOCK((int)newlen));
- XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1,
- XFS_DATA_FORK);
- ip->i_df.if_lastex = idx - 1;
- /* DELTA: One in-core extent grew into a hole. */
- temp2 = temp;
- temp = left.br_startoff;
+ xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
+ nullstartblock((int)newlen));
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
break;
- case MASK(RIGHT_CONTIG):
+ case BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with a delayed allocation
* on the right.
* Merge the new allocation with the right neighbor.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
temp = new->br_blockcount + right.br_blockcount;
- oldlen = STARTBLOCKVAL(new->br_startblock) +
- STARTBLOCKVAL(right.br_startblock);
+ oldlen = startblockval(new->br_startblock) +
+ startblockval(right.br_startblock);
newlen = xfs_bmap_worst_indlen(ip, temp);
- xfs_bmbt_set_allf(ep, new->br_startoff,
- NULLSTARTBLOCK((int)newlen), temp, right.br_state);
- XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK);
- ip->i_df.if_lastex = idx;
- /* DELTA: One in-core extent grew into a hole. */
- temp2 = temp;
- temp = new->br_startoff;
+ xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
+ new->br_startoff,
+ nullstartblock((int)newlen), temp, right.br_state);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
break;
case 0:
@@ -1950,55 +2944,28 @@ xfs_bmap_add_extent_hole_delay(
* Insert a new entry.
*/
oldlen = newlen = 0;
- XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx, 1, new);
- ip->i_df.if_lastex = idx;
- /* DELTA: A new in-core extent was added in a hole. */
- temp2 = new->br_blockcount;
- temp = new->br_startoff;
+ xfs_iext_insert(ip, *idx, 1, new, state);
break;
}
if (oldlen != newlen) {
ASSERT(oldlen > newlen);
- xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
- (int64_t)(oldlen - newlen), rsvd);
+ xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
+ (int64_t)(oldlen - newlen), 0);
/*
* Nothing to do for disk quota accounting here.
*/
}
- if (delta) {
- temp2 += temp;
- if (delta->xed_startoff > temp)
- delta->xed_startoff = temp;
- if (delta->xed_blockcount < temp2)
- delta->xed_blockcount = temp2;
- }
- *logflagsp = 0;
- return 0;
-#undef MASK
-#undef MASK2
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
- * Called by xfs_bmap_add_extent to handle cases converting a hole
- * to a real allocation.
+ * Convert a hole to a real allocation.
*/
STATIC int /* error */
xfs_bmap_add_extent_hole_real(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* extent number to update/insert */
- xfs_btree_cur_t *cur, /* if null, not a btree */
- xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
- int whichfork) /* data or attr fork */
+ struct xfs_bmalloca *bma,
+ int whichfork)
{
- xfs_bmbt_rec_host_t *ep; /* pointer to extent entry ins. point */
+ struct xfs_bmbt_irec *new = &bma->got;
int error; /* error return value */
int i; /* temp state */
xfs_ifork_t *ifp; /* inode fork pointer */
@@ -2006,186 +2973,178 @@ xfs_bmap_add_extent_hole_real(
xfs_bmbt_irec_t right; /* right neighbor extent entry */
int rval=0; /* return value (logging flags) */
int state; /* state bits, accessed thru macros */
- xfs_filblks_t temp=0;
- xfs_filblks_t temp2=0;
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
-
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
- ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
- ep = xfs_iext_get_ext(ifp, idx);
+ ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+
+ ASSERT(bma->idx >= 0);
+ ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+ ASSERT(!isnullstartblock(new->br_startblock));
+ ASSERT(!bma->cur ||
+ !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
+
+ XFS_STATS_INC(xs_add_exlist);
+
state = 0;
+ if (whichfork == XFS_ATTR_FORK)
+ state |= BMAP_ATTRFORK;
+
/*
* Check and set flags if this segment has a left neighbor.
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
- STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
+ if (bma->idx > 0) {
+ state |= BMAP_LEFT_VALID;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
+ if (isnullstartblock(left.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
+
/*
* Check and set flags if this segment has a current value.
* Not true if we're inserting into the "hole" at eof.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
- xfs_bmbt_get_all(ep, &right);
- STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
+ if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+ state |= BMAP_RIGHT_VALID;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
+ if (isnullstartblock(right.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
+
/*
* We're inserting a real allocation between "left" and "right".
* Set the contiguity flags. Don't let extents get too large.
*/
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
- left.br_startoff + left.br_blockcount == new->br_startoff &&
- left.br_startblock + left.br_blockcount == new->br_startblock &&
- left.br_state == new->br_state &&
- left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
- new->br_startoff + new->br_blockcount == right.br_startoff &&
- new->br_startblock + new->br_blockcount ==
- right.br_startblock &&
- new->br_state == right.br_state &&
- new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
- (!STATE_TEST(LEFT_CONTIG) ||
- left.br_blockcount + new->br_blockcount +
- right.br_blockcount <= MAXEXTLEN));
+ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+ left.br_startoff + left.br_blockcount == new->br_startoff &&
+ left.br_startblock + left.br_blockcount == new->br_startblock &&
+ left.br_state == new->br_state &&
+ left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
+ if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+ new->br_startoff + new->br_blockcount == right.br_startoff &&
+ new->br_startblock + new->br_blockcount == right.br_startblock &&
+ new->br_state == right.br_state &&
+ new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
+ (!(state & BMAP_LEFT_CONTIG) ||
+ left.br_blockcount + new->br_blockcount +
+ right.br_blockcount <= MAXEXTLEN))
+ state |= BMAP_RIGHT_CONTIG;
error = 0;
/*
* Select which case we're in here, and implement it.
*/
- switch (SWITCH_STATE) {
-
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with real allocations on the
* left and on the right.
* Merge all three into a single extent record.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
- whichfork);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+ --bma->idx;
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
left.br_blockcount + new->br_blockcount +
right.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
- whichfork);
- XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, whichfork);
- xfs_iext_remove(ifp, idx, 1);
- ifp->if_lastex = idx - 1;
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
- if (cur == NULL) {
- rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+
+ xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
+
+ XFS_IFORK_NEXT_SET(bma->ip, whichfork,
+ XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
+ if (bma->cur == NULL) {
+ rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur,
- right.br_startoff,
- right.br_startblock,
- right.br_blockcount, &i)))
+ error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
+ right.br_startblock, right.br_blockcount,
+ &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_delete(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_btree_delete(bma->cur, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_decrement(cur, 0, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_btree_decrement(bma->cur, 0, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_update(cur, left.br_startoff,
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_bmbt_update(bma->cur, left.br_startoff,
left.br_startblock,
left.br_blockcount +
new->br_blockcount +
right.br_blockcount,
- left.br_state)))
+ left.br_state);
+ if (error)
goto done;
}
- /* DELTA: Two in-core extents were replaced by one. */
- temp = left.br_startoff;
- temp2 = left.br_blockcount +
- new->br_blockcount +
- right.br_blockcount;
break;
- case MASK(LEFT_CONTIG):
+ case BMAP_LEFT_CONTIG:
/*
* New allocation is contiguous with a real allocation
* on the left.
* Merge the new allocation with the left neighbor.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, whichfork);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+ --bma->idx;
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
left.br_blockcount + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork);
- ifp->if_lastex = idx - 1;
- if (cur == NULL) {
- rval = XFS_ILOG_FEXT(whichfork);
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+
+ if (bma->cur == NULL) {
+ rval = xfs_ilog_fext(whichfork);
} else {
rval = 0;
- if ((error = xfs_bmbt_lookup_eq(cur,
- left.br_startoff,
- left.br_startblock,
- left.br_blockcount, &i)))
+ error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
+ left.br_startblock, left.br_blockcount,
+ &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_update(cur, left.br_startoff,
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_bmbt_update(bma->cur, left.br_startoff,
left.br_startblock,
left.br_blockcount +
new->br_blockcount,
- left.br_state)))
+ left.br_state);
+ if (error)
goto done;
}
- /* DELTA: One in-core extent grew. */
- temp = left.br_startoff;
- temp2 = left.br_blockcount +
- new->br_blockcount;
break;
- case MASK(RIGHT_CONTIG):
+ case BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with a real allocation
* on the right.
* Merge the new allocation with the right neighbor.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, whichfork);
- xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
+ trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
+ xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
+ new->br_startoff, new->br_startblock,
new->br_blockcount + right.br_blockcount,
right.br_state);
- XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork);
- ifp->if_lastex = idx;
- if (cur == NULL) {
- rval = XFS_ILOG_FEXT(whichfork);
+ trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+
+ if (bma->cur == NULL) {
+ rval = xfs_ilog_fext(whichfork);
} else {
rval = 0;
- if ((error = xfs_bmbt_lookup_eq(cur,
+ error = xfs_bmbt_lookup_eq(bma->cur,
right.br_startoff,
right.br_startblock,
- right.br_blockcount, &i)))
+ right.br_blockcount, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
- if ((error = xfs_bmbt_update(cur, new->br_startoff,
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
+ error = xfs_bmbt_update(bma->cur, new->br_startoff,
new->br_startblock,
new->br_blockcount +
right.br_blockcount,
- right.br_state)))
+ right.br_state);
+ if (error)
goto done;
}
- /* DELTA: One in-core extent grew. */
- temp = new->br_startoff;
- temp2 = new->br_blockcount +
- right.br_blockcount;
break;
case 0:
@@ -2194,53 +3153,60 @@ xfs_bmap_add_extent_hole_real(
* real allocation.
* Insert a new entry.
*/
- XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, whichfork);
- xfs_iext_insert(ifp, idx, 1, new);
- ifp->if_lastex = idx;
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
- if (cur == NULL) {
- rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+ xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
+ XFS_IFORK_NEXT_SET(bma->ip, whichfork,
+ XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
+ if (bma->cur == NULL) {
+ rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur,
+ error = xfs_bmbt_lookup_eq(bma->cur,
new->br_startoff,
new->br_startblock,
- new->br_blockcount, &i)))
+ new->br_blockcount, &i);
+ if (error)
goto done;
- ASSERT(i == 0);
- cur->bc_rec.b.br_state = new->br_state;
- if ((error = xfs_bmbt_insert(cur, &i)))
+ XFS_WANT_CORRUPTED_GOTO(i == 0, done);
+ bma->cur->bc_rec.b.br_state = new->br_state;
+ error = xfs_btree_insert(bma->cur, &i);
+ if (error)
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- /* DELTA: A new extent was added in a hole. */
- temp = new->br_startoff;
- temp2 = new->br_blockcount;
break;
}
- if (delta) {
- temp2 += temp;
- if (delta->xed_startoff > temp)
- delta->xed_startoff = temp;
- if (delta->xed_blockcount < temp2)
- delta->xed_blockcount = temp2;
+
+ /* convert to a btree if necessary */
+ if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
+ int tmp_logflags; /* partial log flag return val */
+
+ ASSERT(bma->cur == NULL);
+ error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
+ bma->firstblock, bma->flist, &bma->cur,
+ 0, &tmp_logflags, whichfork);
+ bma->logflags |= tmp_logflags;
+ if (error)
+ goto done;
}
+
+ /* clear out the allocated field, done with it now in any case. */
+ if (bma->cur)
+ bma->cur->bc_private.b.allocated = 0;
+
+ xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
done:
- *logflagsp = rval;
+ bma->logflags |= rval;
return error;
-#undef MASK
-#undef MASK2
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
+ * Functions used in the extent read, allocate and remove paths
+ */
+
+/*
* Adjust the size of the new extent based on di_extsize and rt extsize.
*/
-STATIC int
+int
xfs_bmap_extsize_align(
xfs_mount_t *mp,
xfs_bmbt_irec_t *gotp, /* next extent pointer */
@@ -2402,9 +3368,9 @@ xfs_bmap_extsize_align(
#define XFS_ALLOC_GAP_UNITS 4
-STATIC void
+void
xfs_bmap_adjacent(
- xfs_bmalloca_t *ap) /* bmap alloc argument struct */
+ struct xfs_bmalloca *ap) /* bmap alloc argument struct */
{
xfs_fsblock_t adjust; /* adjustment to block numbers */
xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
@@ -2420,26 +3386,26 @@ xfs_bmap_adjacent(
XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
mp = ap->ip->i_mount;
- nullfb = ap->firstblock == NULLFSBLOCK;
+ nullfb = *ap->firstblock == NULLFSBLOCK;
rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
- fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
+ fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
/*
* If allocating at eof, and there's a previous real block,
- * try to use it's last block as our starting point.
+ * try to use its last block as our starting point.
*/
- if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
- !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
- ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
- ap->prevp->br_startblock)) {
- ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
+ if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
+ !isnullstartblock(ap->prev.br_startblock) &&
+ ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
+ ap->prev.br_startblock)) {
+ ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
/*
* Adjust for the gap between prevp and us.
*/
- adjust = ap->off -
- (ap->prevp->br_startoff + ap->prevp->br_blockcount);
+ adjust = ap->offset -
+ (ap->prev.br_startoff + ap->prev.br_blockcount);
if (adjust &&
- ISVALID(ap->rval + adjust, ap->prevp->br_startblock))
- ap->rval += adjust;
+ ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
+ ap->blkno += adjust;
}
/*
* If not at eof, then compare the two neighbor blocks.
@@ -2456,17 +3422,17 @@ xfs_bmap_adjacent(
* If there's a previous (left) block, select a requested
* start block based on it.
*/
- if (ap->prevp->br_startoff != NULLFILEOFF &&
- !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
- (prevbno = ap->prevp->br_startblock +
- ap->prevp->br_blockcount) &&
- ISVALID(prevbno, ap->prevp->br_startblock)) {
+ if (ap->prev.br_startoff != NULLFILEOFF &&
+ !isnullstartblock(ap->prev.br_startblock) &&
+ (prevbno = ap->prev.br_startblock +
+ ap->prev.br_blockcount) &&
+ ISVALID(prevbno, ap->prev.br_startblock)) {
/*
* Calculate gap to end of previous block.
*/
- adjust = prevdiff = ap->off -
- (ap->prevp->br_startoff +
- ap->prevp->br_blockcount);
+ adjust = prevdiff = ap->offset -
+ (ap->prev.br_startoff +
+ ap->prev.br_blockcount);
/*
* Figure the startblock based on the previous block's
* end and the gap size.
@@ -2475,9 +3441,9 @@ xfs_bmap_adjacent(
* allocating, or using it gives us an invalid block
* number, then just use the end of the previous block.
*/
- if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
+ if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
ISVALID(prevbno + prevdiff,
- ap->prevp->br_startblock))
+ ap->prev.br_startblock))
prevbno += adjust;
else
prevdiff += adjust;
@@ -2498,16 +3464,16 @@ xfs_bmap_adjacent(
* If there's a following (right) block, select a requested
* start block based on it.
*/
- if (!ISNULLSTARTBLOCK(ap->gotp->br_startblock)) {
+ if (!isnullstartblock(ap->got.br_startblock)) {
/*
* Calculate gap to start of next block.
*/
- adjust = gotdiff = ap->gotp->br_startoff - ap->off;
+ adjust = gotdiff = ap->got.br_startoff - ap->offset;
/*
* Figure the startblock based on the next block's
* start and the gap size.
*/
- gotbno = ap->gotp->br_startblock;
+ gotbno = ap->got.br_startblock;
/*
* Heuristic!
* If the gap is large relative to the piece we're
@@ -2515,12 +3481,12 @@ xfs_bmap_adjacent(
* number, then just use the start of the next block
* offset by our length.
*/
- if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
+ if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
ISVALID(gotbno - gotdiff, gotbno))
gotbno -= adjust;
- else if (ISVALID(gotbno - ap->alen, gotbno)) {
- gotbno -= ap->alen;
- gotdiff += adjust - ap->alen;
+ else if (ISVALID(gotbno - ap->length, gotbno)) {
+ gotbno -= ap->length;
+ gotdiff += adjust - ap->length;
} else
gotdiff += adjust;
/*
@@ -2538,282 +3504,245 @@ xfs_bmap_adjacent(
gotbno = NULLFSBLOCK;
/*
* If both valid, pick the better one, else the only good
- * one, else ap->rval is already set (to 0 or the inode block).
+ * one, else ap->blkno is already set (to 0 or the inode block).
*/
if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
- ap->rval = prevdiff <= gotdiff ? prevbno : gotbno;
+ ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
else if (prevbno != NULLFSBLOCK)
- ap->rval = prevbno;
+ ap->blkno = prevbno;
else if (gotbno != NULLFSBLOCK)
- ap->rval = gotbno;
+ ap->blkno = gotbno;
}
#undef ISVALID
}
+static int
+xfs_bmap_longest_free_extent(
+ struct xfs_trans *tp,
+ xfs_agnumber_t ag,
+ xfs_extlen_t *blen,
+ int *notinit)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_perag *pag;
+ xfs_extlen_t longest;
+ int error = 0;
+
+ pag = xfs_perag_get(mp, ag);
+ if (!pag->pagf_init) {
+ error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
+ if (error)
+ goto out;
+
+ if (!pag->pagf_init) {
+ *notinit = 1;
+ goto out;
+ }
+ }
+
+ longest = xfs_alloc_longest_free_extent(mp, pag);
+ if (*blen < longest)
+ *blen = longest;
+
+out:
+ xfs_perag_put(pag);
+ return error;
+}
+
+static void
+xfs_bmap_select_minlen(
+ struct xfs_bmalloca *ap,
+ struct xfs_alloc_arg *args,
+ xfs_extlen_t *blen,
+ int notinit)
+{
+ if (notinit || *blen < ap->minlen) {
+ /*
+ * Since we did a BUF_TRYLOCK above, it is possible that
+ * there is space for this request.
+ */
+ args->minlen = ap->minlen;
+ } else if (*blen < args->maxlen) {
+ /*
+ * If the best seen length is less than the request length,
+ * use the best as the minimum.
+ */
+ args->minlen = *blen;
+ } else {
+ /*
+ * Otherwise we've seen an extent as big as maxlen, use that
+ * as the minimum.
+ */
+ args->minlen = args->maxlen;
+ }
+}
+
STATIC int
-xfs_bmap_rtalloc(
- xfs_bmalloca_t *ap) /* bmap alloc argument struct */
+xfs_bmap_btalloc_nullfb(
+ struct xfs_bmalloca *ap,
+ struct xfs_alloc_arg *args,
+ xfs_extlen_t *blen)
{
- xfs_alloctype_t atype = 0; /* type for allocation routines */
- int error; /* error return value */
- xfs_mount_t *mp; /* mount point structure */
- xfs_extlen_t prod = 0; /* product factor for allocators */
- xfs_extlen_t ralen = 0; /* realtime allocation length */
- xfs_extlen_t align; /* minimum allocation alignment */
- xfs_rtblock_t rtb;
+ struct xfs_mount *mp = ap->ip->i_mount;
+ xfs_agnumber_t ag, startag;
+ int notinit = 0;
+ int error;
- mp = ap->ip->i_mount;
- align = xfs_get_extsz_hint(ap->ip);
- prod = align / mp->m_sb.sb_rextsize;
- error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
- align, 1, ap->eof, 0,
- ap->conv, &ap->off, &ap->alen);
+ args->type = XFS_ALLOCTYPE_START_BNO;
+ args->total = ap->total;
+
+ startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
+ if (startag == NULLAGNUMBER)
+ startag = ag = 0;
+
+ while (*blen < args->maxlen) {
+ error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
+ &notinit);
+ if (error)
+ return error;
+
+ if (++ag == mp->m_sb.sb_agcount)
+ ag = 0;
+ if (ag == startag)
+ break;
+ }
+
+ xfs_bmap_select_minlen(ap, args, blen, notinit);
+ return 0;
+}
+
+STATIC int
+xfs_bmap_btalloc_filestreams(
+ struct xfs_bmalloca *ap,
+ struct xfs_alloc_arg *args,
+ xfs_extlen_t *blen)
+{
+ struct xfs_mount *mp = ap->ip->i_mount;
+ xfs_agnumber_t ag;
+ int notinit = 0;
+ int error;
+
+ args->type = XFS_ALLOCTYPE_NEAR_BNO;
+ args->total = ap->total;
+
+ ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
+ if (ag == NULLAGNUMBER)
+ ag = 0;
+
+ error = xfs_bmap_longest_free_extent(args->tp, ag, blen, &notinit);
if (error)
return error;
- ASSERT(ap->alen);
- ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
- /*
- * If the offset & length are not perfectly aligned
- * then kill prod, it will just get us in trouble.
- */
- if (do_mod(ap->off, align) || ap->alen % align)
- prod = 1;
- /*
- * Set ralen to be the actual requested length in rtextents.
- */
- ralen = ap->alen / mp->m_sb.sb_rextsize;
- /*
- * If the old value was close enough to MAXEXTLEN that
- * we rounded up to it, cut it back so it's valid again.
- * Note that if it's a really large request (bigger than
- * MAXEXTLEN), we don't hear about that number, and can't
- * adjust the starting point to match it.
- */
- if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
- ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
- /*
- * If it's an allocation to an empty file at offset 0,
- * pick an extent that will space things out in the rt area.
- */
- if (ap->eof && ap->off == 0) {
- xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
+ if (*blen < args->maxlen) {
+ error = xfs_filestream_new_ag(ap, &ag);
+ if (error)
+ return error;
- error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
+ error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
+ &notinit);
if (error)
return error;
- ap->rval = rtx * mp->m_sb.sb_rextsize;
- } else {
- ap->rval = 0;
+
}
- xfs_bmap_adjacent(ap);
+ xfs_bmap_select_minlen(ap, args, blen, notinit);
/*
- * Realtime allocation, done through xfs_rtallocate_extent.
+ * Set the failure fallback case to look in the selected AG as stream
+ * may have moved.
*/
- atype = ap->rval == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
- do_div(ap->rval, mp->m_sb.sb_rextsize);
- rtb = ap->rval;
- ap->alen = ralen;
- if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen,
- &ralen, atype, ap->wasdel, prod, &rtb)))
- return error;
- if (rtb == NULLFSBLOCK && prod > 1 &&
- (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1,
- ap->alen, &ralen, atype,
- ap->wasdel, 1, &rtb)))
- return error;
- ap->rval = rtb;
- if (ap->rval != NULLFSBLOCK) {
- ap->rval *= mp->m_sb.sb_rextsize;
- ralen *= mp->m_sb.sb_rextsize;
- ap->alen = ralen;
- ap->ip->i_d.di_nblocks += ralen;
- xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
- if (ap->wasdel)
- ap->ip->i_delayed_blks -= ralen;
- /*
- * Adjust the disk quota also. This was reserved
- * earlier.
- */
- XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
- ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
- XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
- } else {
- ap->alen = 0;
- }
+ ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
return 0;
}
STATIC int
xfs_bmap_btalloc(
- xfs_bmalloca_t *ap) /* bmap alloc argument struct */
+ struct xfs_bmalloca *ap) /* bmap alloc argument struct */
{
xfs_mount_t *mp; /* mount point structure */
xfs_alloctype_t atype = 0; /* type for allocation routines */
xfs_extlen_t align; /* minimum allocation alignment */
- xfs_agnumber_t ag;
xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
- xfs_agnumber_t startag;
+ xfs_agnumber_t ag;
xfs_alloc_arg_t args;
xfs_extlen_t blen;
- xfs_extlen_t delta;
- xfs_extlen_t longest;
- xfs_extlen_t need;
xfs_extlen_t nextminlen = 0;
- xfs_perag_t *pag;
int nullfb; /* true if ap->firstblock isn't set */
int isaligned;
- int notinit;
int tryagain;
int error;
+ int stripe_align;
+
+ ASSERT(ap->length);
mp = ap->ip->i_mount;
+
+ /* stripe alignment for allocation is determined by mount parameters */
+ stripe_align = 0;
+ if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
+ stripe_align = mp->m_swidth;
+ else if (mp->m_dalign)
+ stripe_align = mp->m_dalign;
+
align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
if (unlikely(align)) {
- error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
+ error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
align, 0, ap->eof, 0, ap->conv,
- &ap->off, &ap->alen);
+ &ap->offset, &ap->length);
ASSERT(!error);
- ASSERT(ap->alen);
+ ASSERT(ap->length);
}
- nullfb = ap->firstblock == NULLFSBLOCK;
- fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
+
+
+ nullfb = *ap->firstblock == NULLFSBLOCK;
+ fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
if (nullfb) {
if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
ag = xfs_filestream_lookup_ag(ap->ip);
ag = (ag != NULLAGNUMBER) ? ag : 0;
- ap->rval = XFS_AGB_TO_FSB(mp, ag, 0);
+ ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
} else {
- ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
+ ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
}
} else
- ap->rval = ap->firstblock;
+ ap->blkno = *ap->firstblock;
xfs_bmap_adjacent(ap);
/*
- * If allowed, use ap->rval; otherwise must use firstblock since
+ * If allowed, use ap->blkno; otherwise must use firstblock since
* it's in the right allocation group.
*/
- if (nullfb || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno)
+ if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
;
else
- ap->rval = ap->firstblock;
+ ap->blkno = *ap->firstblock;
/*
* Normal allocation, done through xfs_alloc_vextent.
*/
tryagain = isaligned = 0;
+ memset(&args, 0, sizeof(args));
args.tp = ap->tp;
args.mp = mp;
- args.fsbno = ap->rval;
- args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
- args.firstblock = ap->firstblock;
+ args.fsbno = ap->blkno;
+
+ /* Trim the allocation back to the maximum an AG can fit. */
+ args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp));
+ args.firstblock = *ap->firstblock;
blen = 0;
if (nullfb) {
- if (ap->userdata && xfs_inode_is_filestream(ap->ip))
- args.type = XFS_ALLOCTYPE_NEAR_BNO;
- else
- args.type = XFS_ALLOCTYPE_START_BNO;
- args.total = ap->total;
-
- /*
- * Search for an allocation group with a single extent
- * large enough for the request.
- *
- * If one isn't found, then adjust the minimum allocation
- * size to the largest space found.
- */
- startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno);
- if (startag == NULLAGNUMBER)
- startag = ag = 0;
- notinit = 0;
- down_read(&mp->m_peraglock);
- while (blen < ap->alen) {
- pag = &mp->m_perag[ag];
- if (!pag->pagf_init &&
- (error = xfs_alloc_pagf_init(mp, args.tp,
- ag, XFS_ALLOC_FLAG_TRYLOCK))) {
- up_read(&mp->m_peraglock);
- return error;
- }
- /*
- * See xfs_alloc_fix_freelist...
- */
- if (pag->pagf_init) {
- need = XFS_MIN_FREELIST_PAG(pag, mp);
- delta = need > pag->pagf_flcount ?
- need - pag->pagf_flcount : 0;
- longest = (pag->pagf_longest > delta) ?
- (pag->pagf_longest - delta) :
- (pag->pagf_flcount > 0 ||
- pag->pagf_longest > 0);
- if (blen < longest)
- blen = longest;
- } else
- notinit = 1;
-
- if (xfs_inode_is_filestream(ap->ip)) {
- if (blen >= ap->alen)
- break;
-
- if (ap->userdata) {
- /*
- * If startag is an invalid AG, we've
- * come here once before and
- * xfs_filestream_new_ag picked the
- * best currently available.
- *
- * Don't continue looping, since we
- * could loop forever.
- */
- if (startag == NULLAGNUMBER)
- break;
-
- error = xfs_filestream_new_ag(ap, &ag);
- if (error) {
- up_read(&mp->m_peraglock);
- return error;
- }
-
- /* loop again to set 'blen'*/
- startag = NULLAGNUMBER;
- continue;
- }
- }
- if (++ag == mp->m_sb.sb_agcount)
- ag = 0;
- if (ag == startag)
- break;
- }
- up_read(&mp->m_peraglock);
- /*
- * Since the above loop did a BUF_TRYLOCK, it is
- * possible that there is space for this request.
- */
- if (notinit || blen < ap->minlen)
- args.minlen = ap->minlen;
- /*
- * If the best seen length is less than the request
- * length, use the best as the minimum.
- */
- else if (blen < ap->alen)
- args.minlen = blen;
/*
- * Otherwise we've seen an extent as big as alen,
- * use that as the minimum.
+ * Search for an allocation group with a single extent large
+ * enough for the request. If one isn't found, then adjust
+ * the minimum allocation size to the largest space found.
*/
+ if (ap->userdata && xfs_inode_is_filestream(ap->ip))
+ error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
else
- args.minlen = ap->alen;
-
- /*
- * set the failure fallback case to look in the selected
- * AG as the stream may have moved.
- */
- if (xfs_inode_is_filestream(ap->ip))
- ap->rval = args.fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
- } else if (ap->low) {
+ error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
+ if (error)
+ return error;
+ } else if (ap->flist->xbf_low) {
if (xfs_inode_is_filestream(ap->ip))
args.type = XFS_ALLOCTYPE_FIRST_AG;
else
@@ -2827,14 +3756,14 @@ xfs_bmap_btalloc(
/* apply extent size hints if obtained earlier */
if (unlikely(align)) {
args.prod = align;
- if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
+ if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
args.mod = (xfs_extlen_t)(args.prod - args.mod);
} else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
args.prod = 1;
args.mod = 0;
} else {
args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
- if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
+ if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
args.mod = (xfs_extlen_t)(args.prod - args.mod);
}
/*
@@ -2846,15 +3775,15 @@ xfs_bmap_btalloc(
* is >= the stripe unit and the allocation offset is
* at the end of file.
*/
- if (!ap->low && ap->aeof) {
- if (!ap->off) {
- args.alignment = mp->m_dalign;
+ if (!ap->flist->xbf_low && ap->aeof) {
+ if (!ap->offset) {
+ args.alignment = stripe_align;
atype = args.type;
isaligned = 1;
/*
* Adjust for alignment
*/
- if (blen > args.alignment && blen <= ap->alen)
+ if (blen > args.alignment && blen <= args.maxlen)
args.minlen = blen - args.alignment;
args.minalignslop = 0;
} else {
@@ -2873,13 +3802,13 @@ xfs_bmap_btalloc(
* of minlen+alignment+slop doesn't go up
* between the calls.
*/
- if (blen > mp->m_dalign && blen <= ap->alen)
- nextminlen = blen - mp->m_dalign;
+ if (blen > stripe_align && blen <= args.maxlen)
+ nextminlen = blen - stripe_align;
else
nextminlen = args.minlen;
- if (nextminlen + mp->m_dalign > args.minlen + 1)
+ if (nextminlen + stripe_align > args.minlen + 1)
args.minalignslop =
- nextminlen + mp->m_dalign -
+ nextminlen + stripe_align -
args.minlen - 1;
else
args.minalignslop = 0;
@@ -2900,8 +3829,8 @@ xfs_bmap_btalloc(
* turned on.
*/
args.type = atype;
- args.fsbno = ap->rval;
- args.alignment = mp->m_dalign;
+ args.fsbno = ap->blkno;
+ args.alignment = stripe_align;
args.minlen = nextminlen;
args.minalignslop = 0;
isaligned = 1;
@@ -2914,7 +3843,7 @@ xfs_bmap_btalloc(
* try again.
*/
args.type = atype;
- args.fsbno = ap->rval;
+ args.fsbno = ap->blkno;
args.alignment = 0;
if ((error = xfs_alloc_vextent(&args)))
return error;
@@ -2923,7 +3852,7 @@ xfs_bmap_btalloc(
args.minlen > ap->minlen) {
args.minlen = ap->minlen;
args.type = XFS_ALLOCTYPE_START_BNO;
- args.fsbno = ap->rval;
+ args.fsbno = ap->blkno;
if ((error = xfs_alloc_vextent(&args)))
return error;
}
@@ -2934,13 +3863,26 @@ xfs_bmap_btalloc(
args.minleft = 0;
if ((error = xfs_alloc_vextent(&args)))
return error;
- ap->low = 1;
+ ap->flist->xbf_low = 1;
}
if (args.fsbno != NULLFSBLOCK) {
- ap->firstblock = ap->rval = args.fsbno;
+ /*
+ * check the allocation happened at the same or higher AG than
+ * the first block that was allocated.
+ */
+ ASSERT(*ap->firstblock == NULLFSBLOCK ||
+ XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
+ XFS_FSB_TO_AGNO(mp, args.fsbno) ||
+ (ap->flist->xbf_low &&
+ XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
+ XFS_FSB_TO_AGNO(mp, args.fsbno)));
+
+ ap->blkno = args.fsbno;
+ if (*ap->firstblock == NULLFSBLOCK)
+ *ap->firstblock = args.fsbno;
ASSERT(nullfb || fb_agno == args.agno ||
- (ap->low && fb_agno < args.agno));
- ap->alen = args.len;
+ (ap->flist->xbf_low && fb_agno < args.agno));
+ ap->length = args.len;
ap->ip->i_d.di_nblocks += args.len;
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
if (ap->wasdel)
@@ -2949,13 +3891,13 @@ xfs_bmap_btalloc(
* Adjust the disk quota also. This was reserved
* earlier.
*/
- XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
+ xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
XFS_TRANS_DQ_BCOUNT,
(long) args.len);
} else {
- ap->rval = NULLFSBLOCK;
- ap->alen = 0;
+ ap->blkno = NULLFSBLOCK;
+ ap->length = 0;
}
return 0;
}
@@ -2966,7 +3908,7 @@ xfs_bmap_btalloc(
*/
STATIC int
xfs_bmap_alloc(
- xfs_bmalloca_t *ap) /* bmap alloc argument struct */
+ struct xfs_bmalloca *ap) /* bmap alloc argument struct */
{
if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
return xfs_bmap_rtalloc(ap);
@@ -2974,65 +3916,795 @@ xfs_bmap_alloc(
}
/*
- * Transform a btree format file with only one leaf node, where the
- * extents list will fit in the inode, into an extents format file.
- * Since the file extents are already in-core, all we have to do is
- * give up the space for the btree root and pitch the leaf block.
+ * Trim the returned map to the required bounds
*/
-STATIC int /* error */
-xfs_bmap_btree_to_extents(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_btree_cur_t *cur, /* btree cursor */
- int *logflagsp, /* inode logging flags */
- int whichfork) /* data or attr fork */
+STATIC void
+xfs_bmapi_trim_map(
+ struct xfs_bmbt_irec *mval,
+ struct xfs_bmbt_irec *got,
+ xfs_fileoff_t *bno,
+ xfs_filblks_t len,
+ xfs_fileoff_t obno,
+ xfs_fileoff_t end,
+ int n,
+ int flags)
{
- /* REFERENCED */
- xfs_bmbt_block_t *cblock;/* child btree block */
- xfs_fsblock_t cbno; /* child block number */
- xfs_buf_t *cbp; /* child block's buffer */
- int error; /* error return value */
- xfs_ifork_t *ifp; /* inode fork data */
- xfs_mount_t *mp; /* mount point structure */
- __be64 *pp; /* ptr to block address */
- xfs_bmbt_block_t *rblock;/* root btree block */
+ if ((flags & XFS_BMAPI_ENTIRE) ||
+ got->br_startoff + got->br_blockcount <= obno) {
+ *mval = *got;
+ if (isnullstartblock(got->br_startblock))
+ mval->br_startblock = DELAYSTARTBLOCK;
+ return;
+ }
+
+ if (obno > *bno)
+ *bno = obno;
+ ASSERT((*bno >= obno) || (n == 0));
+ ASSERT(*bno < end);
+ mval->br_startoff = *bno;
+ if (isnullstartblock(got->br_startblock))
+ mval->br_startblock = DELAYSTARTBLOCK;
+ else
+ mval->br_startblock = got->br_startblock +
+ (*bno - got->br_startoff);
+ /*
+ * Return the minimum of what we got and what we asked for for
+ * the length. We can use the len variable here because it is
+ * modified below and we could have been there before coming
+ * here if the first part of the allocation didn't overlap what
+ * was asked for.
+ */
+ mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
+ got->br_blockcount - (*bno - got->br_startoff));
+ mval->br_state = got->br_state;
+ ASSERT(mval->br_blockcount <= len);
+ return;
+}
+
+/*
+ * Update and validate the extent map to return
+ */
+STATIC void
+xfs_bmapi_update_map(
+ struct xfs_bmbt_irec **map,
+ xfs_fileoff_t *bno,
+ xfs_filblks_t *len,
+ xfs_fileoff_t obno,
+ xfs_fileoff_t end,
+ int *n,
+ int flags)
+{
+ xfs_bmbt_irec_t *mval = *map;
+
+ ASSERT((flags & XFS_BMAPI_ENTIRE) ||
+ ((mval->br_startoff + mval->br_blockcount) <= end));
+ ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
+ (mval->br_startoff < obno));
+
+ *bno = mval->br_startoff + mval->br_blockcount;
+ *len = end - *bno;
+ if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
+ /* update previous map with new information */
+ ASSERT(mval->br_startblock == mval[-1].br_startblock);
+ ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
+ ASSERT(mval->br_state == mval[-1].br_state);
+ mval[-1].br_blockcount = mval->br_blockcount;
+ mval[-1].br_state = mval->br_state;
+ } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
+ mval[-1].br_startblock != DELAYSTARTBLOCK &&
+ mval[-1].br_startblock != HOLESTARTBLOCK &&
+ mval->br_startblock == mval[-1].br_startblock +
+ mval[-1].br_blockcount &&
+ ((flags & XFS_BMAPI_IGSTATE) ||
+ mval[-1].br_state == mval->br_state)) {
+ ASSERT(mval->br_startoff ==
+ mval[-1].br_startoff + mval[-1].br_blockcount);
+ mval[-1].br_blockcount += mval->br_blockcount;
+ } else if (*n > 0 &&
+ mval->br_startblock == DELAYSTARTBLOCK &&
+ mval[-1].br_startblock == DELAYSTARTBLOCK &&
+ mval->br_startoff ==
+ mval[-1].br_startoff + mval[-1].br_blockcount) {
+ mval[-1].br_blockcount += mval->br_blockcount;
+ mval[-1].br_state = mval->br_state;
+ } else if (!((*n == 0) &&
+ ((mval->br_startoff + mval->br_blockcount) <=
+ obno))) {
+ mval++;
+ (*n)++;
+ }
+ *map = mval;
+}
+
+/*
+ * Map file blocks to filesystem blocks without allocation.
+ */
+int
+xfs_bmapi_read(
+ struct xfs_inode *ip,
+ xfs_fileoff_t bno,
+ xfs_filblks_t len,
+ struct xfs_bmbt_irec *mval,
+ int *nmap,
+ int flags)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp;
+ struct xfs_bmbt_irec got;
+ struct xfs_bmbt_irec prev;
+ xfs_fileoff_t obno;
+ xfs_fileoff_t end;
+ xfs_extnum_t lastx;
+ int error;
+ int eof;
+ int n = 0;
+ int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+ XFS_ATTR_FORK : XFS_DATA_FORK;
+
+ ASSERT(*nmap >= 1);
+ ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
+ XFS_BMAPI_IGSTATE)));
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
+
+ if (unlikely(XFS_TEST_ERROR(
+ (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
+ mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+ XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return XFS_ERROR(EIO);
+
+ XFS_STATS_INC(xs_blk_mapr);
ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(ifp->if_flags & XFS_IFEXTENTS);
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
- rblock = ifp->if_broot;
- ASSERT(be16_to_cpu(rblock->bb_level) == 1);
- ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
- ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1);
- mp = ip->i_mount;
- pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes);
- cbno = be64_to_cpu(*pp);
- *logflagsp = 0;
-#ifdef DEBUG
- if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
+
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(NULL, ip, whichfork);
+ if (error)
+ return error;
+ }
+
+ xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
+ end = bno + len;
+ obno = bno;
+
+ while (bno < end && n < *nmap) {
+ /* Reading past eof, act as though there's a hole up to end. */
+ if (eof)
+ got.br_startoff = end;
+ if (got.br_startoff > bno) {
+ /* Reading in a hole. */
+ mval->br_startoff = bno;
+ mval->br_startblock = HOLESTARTBLOCK;
+ mval->br_blockcount =
+ XFS_FILBLKS_MIN(len, got.br_startoff - bno);
+ mval->br_state = XFS_EXT_NORM;
+ bno += mval->br_blockcount;
+ len -= mval->br_blockcount;
+ mval++;
+ n++;
+ continue;
+ }
+
+ /* set up the extent map to return. */
+ xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
+ xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
+
+ /* If we're done, stop now. */
+ if (bno >= end || n >= *nmap)
+ break;
+
+ /* Else go on to the next record. */
+ if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
+ else
+ eof = 1;
+ }
+ *nmap = n;
+ return 0;
+}
+
+STATIC int
+xfs_bmapi_reserve_delalloc(
+ struct xfs_inode *ip,
+ xfs_fileoff_t aoff,
+ xfs_filblks_t len,
+ struct xfs_bmbt_irec *got,
+ struct xfs_bmbt_irec *prev,
+ xfs_extnum_t *lastx,
+ int eof)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ xfs_extlen_t alen;
+ xfs_extlen_t indlen;
+ char rt = XFS_IS_REALTIME_INODE(ip);
+ xfs_extlen_t extsz;
+ int error;
+
+ alen = XFS_FILBLKS_MIN(len, MAXEXTLEN);
+ if (!eof)
+ alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
+
+ /* Figure out the extent size, adjust alen */
+ extsz = xfs_get_extsz_hint(ip);
+ if (extsz) {
+ /*
+ * Make sure we don't exceed a single extent length when we
+ * align the extent by reducing length we are going to
+ * allocate by the maximum amount extent size aligment may
+ * require.
+ */
+ alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1));
+ error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
+ 1, 0, &aoff, &alen);
+ ASSERT(!error);
+ }
+
+ if (rt)
+ extsz = alen / mp->m_sb.sb_rextsize;
+
+ /*
+ * Make a transaction-less quota reservation for delayed allocation
+ * blocks. This number gets adjusted later. We return if we haven't
+ * allocated blocks already inside this loop.
+ */
+ error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
+ rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+ if (error)
return error;
-#endif
- if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
- XFS_BMAP_BTREE_REF)))
+
+ /*
+ * Split changing sb for alen and indlen since they could be coming
+ * from different places.
+ */
+ indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
+ ASSERT(indlen > 0);
+
+ if (rt) {
+ error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
+ -((int64_t)extsz), 0);
+ } else {
+ error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+ -((int64_t)alen), 0);
+ }
+
+ if (error)
+ goto out_unreserve_quota;
+
+ error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+ -((int64_t)indlen), 0);
+ if (error)
+ goto out_unreserve_blocks;
+
+
+ ip->i_delayed_blks += alen;
+
+ got->br_startoff = aoff;
+ got->br_startblock = nullstartblock(indlen);
+ got->br_blockcount = alen;
+ got->br_state = XFS_EXT_NORM;
+ xfs_bmap_add_extent_hole_delay(ip, lastx, got);
+
+ /*
+ * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
+ * might have merged it into one of the neighbouring ones.
+ */
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
+
+ ASSERT(got->br_startoff <= aoff);
+ ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
+ ASSERT(isnullstartblock(got->br_startblock));
+ ASSERT(got->br_state == XFS_EXT_NORM);
+ return 0;
+
+out_unreserve_blocks:
+ if (rt)
+ xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, 0);
+ else
+ xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0);
+out_unreserve_quota:
+ if (XFS_IS_QUOTA_ON(mp))
+ xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
+ XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+ return error;
+}
+
+/*
+ * Map file blocks to filesystem blocks, adding delayed allocations as needed.
+ */
+int
+xfs_bmapi_delay(
+ struct xfs_inode *ip, /* incore inode */
+ xfs_fileoff_t bno, /* starting file offs. mapped */
+ xfs_filblks_t len, /* length to map in file */
+ struct xfs_bmbt_irec *mval, /* output: map values */
+ int *nmap, /* i/o: mval size/count */
+ int flags) /* XFS_BMAPI_... */
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ struct xfs_bmbt_irec got; /* current file extent record */
+ struct xfs_bmbt_irec prev; /* previous file extent record */
+ xfs_fileoff_t obno; /* old block number (offset) */
+ xfs_fileoff_t end; /* end of mapped file region */
+ xfs_extnum_t lastx; /* last useful extent number */
+ int eof; /* we've hit the end of extents */
+ int n = 0; /* current extent index */
+ int error = 0;
+
+ ASSERT(*nmap >= 1);
+ ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
+ ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+ if (unlikely(XFS_TEST_ERROR(
+ (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
+ mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+ XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return XFS_ERROR(EIO);
+
+ XFS_STATS_INC(xs_blk_mapw);
+
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+ if (error)
+ return error;
+ }
+
+ xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev);
+ end = bno + len;
+ obno = bno;
+
+ while (bno < end && n < *nmap) {
+ if (eof || got.br_startoff > bno) {
+ error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got,
+ &prev, &lastx, eof);
+ if (error) {
+ if (n == 0) {
+ *nmap = 0;
+ return error;
+ }
+ break;
+ }
+ }
+
+ /* set up the extent map to return. */
+ xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
+ xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
+
+ /* If we're done, stop now. */
+ if (bno >= end || n >= *nmap)
+ break;
+
+ /* Else go on to the next record. */
+ prev = got;
+ if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
+ else
+ eof = 1;
+ }
+
+ *nmap = n;
+ return 0;
+}
+
+
+static int
+xfs_bmapi_allocate(
+ struct xfs_bmalloca *bma)
+{
+ struct xfs_mount *mp = bma->ip->i_mount;
+ int whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ?
+ XFS_ATTR_FORK : XFS_DATA_FORK;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+ int tmp_logflags = 0;
+ int error;
+
+ ASSERT(bma->length > 0);
+
+ /*
+ * For the wasdelay case, we could also just allocate the stuff asked
+ * for in this bmap call but that wouldn't be as good.
+ */
+ if (bma->wasdel) {
+ bma->length = (xfs_extlen_t)bma->got.br_blockcount;
+ bma->offset = bma->got.br_startoff;
+ if (bma->idx != NULLEXTNUM && bma->idx) {
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
+ &bma->prev);
+ }
+ } else {
+ bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
+ if (!bma->eof)
+ bma->length = XFS_FILBLKS_MIN(bma->length,
+ bma->got.br_startoff - bma->offset);
+ }
+
+ /*
+ * Indicate if this is the first user data in the file, or just any
+ * user data.
+ */
+ if (!(bma->flags & XFS_BMAPI_METADATA)) {
+ bma->userdata = (bma->offset == 0) ?
+ XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA;
+ }
+
+ bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
+
+ /*
+ * Only want to do the alignment at the eof if it is userdata and
+ * allocation length is larger than a stripe unit.
+ */
+ if (mp->m_dalign && bma->length >= mp->m_dalign &&
+ !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
+ error = xfs_bmap_isaeof(bma, whichfork);
+ if (error)
+ return error;
+ }
+
+ error = xfs_bmap_alloc(bma);
+ if (error)
+ return error;
+
+ if (bma->flist->xbf_low)
+ bma->minleft = 0;
+ if (bma->cur)
+ bma->cur->bc_private.b.firstblock = *bma->firstblock;
+ if (bma->blkno == NULLFSBLOCK)
+ return 0;
+ if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
+ bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
+ bma->cur->bc_private.b.firstblock = *bma->firstblock;
+ bma->cur->bc_private.b.flist = bma->flist;
+ }
+ /*
+ * Bump the number of extents we've allocated
+ * in this call.
+ */
+ bma->nallocs++;
+
+ if (bma->cur)
+ bma->cur->bc_private.b.flags =
+ bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
+
+ bma->got.br_startoff = bma->offset;
+ bma->got.br_startblock = bma->blkno;
+ bma->got.br_blockcount = bma->length;
+ bma->got.br_state = XFS_EXT_NORM;
+
+ /*
+ * A wasdelay extent has been initialized, so shouldn't be flagged
+ * as unwritten.
+ */
+ if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) &&
+ xfs_sb_version_hasextflgbit(&mp->m_sb))
+ bma->got.br_state = XFS_EXT_UNWRITTEN;
+
+ if (bma->wasdel)
+ error = xfs_bmap_add_extent_delay_real(bma);
+ else
+ error = xfs_bmap_add_extent_hole_real(bma, whichfork);
+
+ bma->logflags |= tmp_logflags;
+ if (error)
return error;
- cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
- if ((error = xfs_btree_check_lblock(cur, cblock, 0, cbp)))
+
+ /*
+ * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
+ * or xfs_bmap_add_extent_hole_real might have merged it into one of
+ * the neighbouring ones.
+ */
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
+
+ ASSERT(bma->got.br_startoff <= bma->offset);
+ ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
+ bma->offset + bma->length);
+ ASSERT(bma->got.br_state == XFS_EXT_NORM ||
+ bma->got.br_state == XFS_EXT_UNWRITTEN);
+ return 0;
+}
+
+STATIC int
+xfs_bmapi_convert_unwritten(
+ struct xfs_bmalloca *bma,
+ struct xfs_bmbt_irec *mval,
+ xfs_filblks_t len,
+ int flags)
+{
+ int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+ XFS_ATTR_FORK : XFS_DATA_FORK;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+ int tmp_logflags = 0;
+ int error;
+
+ /* check if we need to do unwritten->real conversion */
+ if (mval->br_state == XFS_EXT_UNWRITTEN &&
+ (flags & XFS_BMAPI_PREALLOC))
+ return 0;
+
+ /* check if we need to do real->unwritten conversion */
+ if (mval->br_state == XFS_EXT_NORM &&
+ (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
+ (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
+ return 0;
+
+ /*
+ * Modify (by adding) the state flag, if writing.
+ */
+ ASSERT(mval->br_blockcount <= len);
+ if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
+ bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
+ bma->ip, whichfork);
+ bma->cur->bc_private.b.firstblock = *bma->firstblock;
+ bma->cur->bc_private.b.flist = bma->flist;
+ }
+ mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
+ ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
+
+ error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
+ &bma->cur, mval, bma->firstblock, bma->flist,
+ &tmp_logflags);
+ bma->logflags |= tmp_logflags;
+ if (error)
return error;
- xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
- ip->i_d.di_nblocks--;
- XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
- xfs_trans_binval(tp, cbp);
- if (cur->bc_bufs[0] == cbp)
- cur->bc_bufs[0] = NULL;
- xfs_iroot_realloc(ip, -1, whichfork);
- ASSERT(ifp->if_broot == NULL);
- ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
- *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+
+ /*
+ * Update our extent pointer, given that
+ * xfs_bmap_add_extent_unwritten_real might have merged it into one
+ * of the neighbouring ones.
+ */
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
+
+ /*
+ * We may have combined previously unwritten space with written space,
+ * so generate another request.
+ */
+ if (mval->br_blockcount < len)
+ return EAGAIN;
return 0;
}
/*
+ * Map file blocks to filesystem blocks, and allocate blocks or convert the
+ * extent state if necessary. Details behaviour is controlled by the flags
+ * parameter. Only allocates blocks from a single allocation group, to avoid
+ * locking problems.
+ *
+ * The returned value in "firstblock" from the first call in a transaction
+ * must be remembered and presented to subsequent calls in "firstblock".
+ * An upper bound for the number of blocks to be allocated is supplied to
+ * the first call in "total"; if no allocation group has that many free
+ * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
+ */
+int
+xfs_bmapi_write(
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_inode *ip, /* incore inode */
+ xfs_fileoff_t bno, /* starting file offs. mapped */
+ xfs_filblks_t len, /* length to map in file */
+ int flags, /* XFS_BMAPI_... */
+ xfs_fsblock_t *firstblock, /* first allocated block
+ controls a.g. for allocs */
+ xfs_extlen_t total, /* total blocks needed */
+ struct xfs_bmbt_irec *mval, /* output: map values */
+ int *nmap, /* i/o: mval size/count */
+ struct xfs_bmap_free *flist) /* i/o: list extents to free */
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp;
+ struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
+ xfs_fileoff_t end; /* end of mapped file region */
+ int eof; /* after the end of extents */
+ int error; /* error return */
+ int n; /* current extent index */
+ xfs_fileoff_t obno; /* old block number (offset) */
+ int whichfork; /* data or attr fork */
+ char inhole; /* current location is hole in file */
+ char wasdelay; /* old extent was delayed */
+
+#ifdef DEBUG
+ xfs_fileoff_t orig_bno; /* original block number value */
+ int orig_flags; /* original flags arg value */
+ xfs_filblks_t orig_len; /* original value of len arg */
+ struct xfs_bmbt_irec *orig_mval; /* original value of mval */
+ int orig_nmap; /* original value of *nmap */
+
+ orig_bno = bno;
+ orig_len = len;
+ orig_flags = flags;
+ orig_mval = mval;
+ orig_nmap = *nmap;
+#endif
+ whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
+ XFS_ATTR_FORK : XFS_DATA_FORK;
+
+ ASSERT(*nmap >= 1);
+ ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
+ ASSERT(!(flags & XFS_BMAPI_IGSTATE));
+ ASSERT(tp != NULL);
+ ASSERT(len > 0);
+ ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+ if (unlikely(XFS_TEST_ERROR(
+ (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
+ mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+ XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return XFS_ERROR(EIO);
+
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+
+ XFS_STATS_INC(xs_blk_mapw);
+
+ if (*firstblock == NULLFSBLOCK) {
+ if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
+ bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
+ else
+ bma.minleft = 1;
+ } else {
+ bma.minleft = 0;
+ }
+
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(tp, ip, whichfork);
+ if (error)
+ goto error0;
+ }
+
+ xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
+ &bma.prev);
+ n = 0;
+ end = bno + len;
+ obno = bno;
+
+ bma.tp = tp;
+ bma.ip = ip;
+ bma.total = total;
+ bma.userdata = 0;
+ bma.flist = flist;
+ bma.firstblock = firstblock;
+
+ while (bno < end && n < *nmap) {
+ inhole = eof || bma.got.br_startoff > bno;
+ wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
+
+ /*
+ * First, deal with the hole before the allocated space
+ * that we found, if any.
+ */
+ if (inhole || wasdelay) {
+ bma.eof = eof;
+ bma.conv = !!(flags & XFS_BMAPI_CONVERT);
+ bma.wasdel = wasdelay;
+ bma.offset = bno;
+ bma.flags = flags;
+
+ /*
+ * There's a 32/64 bit type mismatch between the
+ * allocation length request (which can be 64 bits in
+ * length) and the bma length request, which is
+ * xfs_extlen_t and therefore 32 bits. Hence we have to
+ * check for 32-bit overflows and handle them here.
+ */
+ if (len > (xfs_filblks_t)MAXEXTLEN)
+ bma.length = MAXEXTLEN;
+ else
+ bma.length = len;
+
+ ASSERT(len > 0);
+ ASSERT(bma.length > 0);
+ error = xfs_bmapi_allocate(&bma);
+ if (error)
+ goto error0;
+ if (bma.blkno == NULLFSBLOCK)
+ break;
+ }
+
+ /* Deal with the allocated space we found. */
+ xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
+ end, n, flags);
+
+ /* Execute unwritten extent conversion if necessary */
+ error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
+ if (error == EAGAIN)
+ continue;
+ if (error)
+ goto error0;
+
+ /* update the extent map to return */
+ xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
+
+ /*
+ * If we're done, stop now. Stop when we've allocated
+ * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
+ * the transaction may get too big.
+ */
+ if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
+ break;
+
+ /* Else go on to the next record. */
+ bma.prev = bma.got;
+ if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) {
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
+ &bma.got);
+ } else
+ eof = 1;
+ }
+ *nmap = n;
+
+ /*
+ * Transform from btree to extents, give it cur.
+ */
+ if (xfs_bmap_wants_extents(ip, whichfork)) {
+ int tmp_logflags = 0;
+
+ ASSERT(bma.cur);
+ error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
+ &tmp_logflags, whichfork);
+ bma.logflags |= tmp_logflags;
+ if (error)
+ goto error0;
+ }
+
+ ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
+ XFS_IFORK_NEXTENTS(ip, whichfork) >
+ XFS_IFORK_MAXEXT(ip, whichfork));
+ error = 0;
+error0:
+ /*
+ * Log everything. Do this after conversion, there's no point in
+ * logging the extent records if we've converted to btree format.
+ */
+ if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+ bma.logflags &= ~xfs_ilog_fext(whichfork);
+ else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
+ bma.logflags &= ~xfs_ilog_fbroot(whichfork);
+ /*
+ * Log whatever the flags say, even if error. Otherwise we might miss
+ * detecting a case where the data is changed, there's an error,
+ * and it's not logged so we don't shutdown when we should.
+ */
+ if (bma.logflags)
+ xfs_trans_log_inode(tp, ip, bma.logflags);
+
+ if (bma.cur) {
+ if (!error) {
+ ASSERT(*firstblock == NULLFSBLOCK ||
+ XFS_FSB_TO_AGNO(mp, *firstblock) ==
+ XFS_FSB_TO_AGNO(mp,
+ bma.cur->bc_private.b.firstblock) ||
+ (flist->xbf_low &&
+ XFS_FSB_TO_AGNO(mp, *firstblock) <
+ XFS_FSB_TO_AGNO(mp,
+ bma.cur->bc_private.b.firstblock)));
+ *firstblock = bma.cur->bc_private.b.firstblock;
+ }
+ xfs_btree_del_cursor(bma.cur,
+ error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ }
+ if (!error)
+ xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
+ orig_nmap, *nmap);
+ return error;
+}
+
+/*
* Called by xfs_bmapi to update file extent records and the btree
* after removing space (or undoing a delayed allocation).
*/
@@ -3040,14 +4712,12 @@ STATIC int /* error */
xfs_bmap_del_extent(
xfs_inode_t *ip, /* incore inode pointer */
xfs_trans_t *tp, /* current transaction pointer */
- xfs_extnum_t idx, /* extent number to update/delete */
+ xfs_extnum_t *idx, /* extent number to update/delete */
xfs_bmap_free_t *flist, /* list of extents to be freed */
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *del, /* data to remove from extents */
int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
- int whichfork, /* data or attr fork */
- int rsvd) /* OK to allocate reserved blocks */
+ int whichfork) /* data or attr fork */
{
xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
@@ -3069,21 +4739,26 @@ xfs_bmap_del_extent(
uint qfield; /* quota field to update */
xfs_filblks_t temp; /* for indirect length calculations */
xfs_filblks_t temp2; /* for indirect length calculations */
+ int state = 0;
XFS_STATS_INC(xs_del_exlist);
+
+ if (whichfork == XFS_ATTR_FORK)
+ state |= BMAP_ATTRFORK;
+
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT((idx >= 0) && (idx < ifp->if_bytes /
+ ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
(uint)sizeof(xfs_bmbt_rec_t)));
ASSERT(del->br_blockcount > 0);
- ep = xfs_iext_get_ext(ifp, idx);
+ ep = xfs_iext_get_ext(ifp, *idx);
xfs_bmbt_get_all(ep, &got);
ASSERT(got.br_startoff <= del->br_startoff);
del_endoff = del->br_startoff + del->br_blockcount;
got_endoff = got.br_startoff + got.br_blockcount;
ASSERT(got_endoff >= del_endoff);
- delay = ISNULLSTARTBLOCK(got.br_startblock);
- ASSERT(ISNULLSTARTBLOCK(del->br_startblock) == delay);
+ delay = isnullstartblock(got.br_startblock);
+ ASSERT(isnullstartblock(del->br_startblock) == delay);
flags = 0;
qfield = 0;
error = 0;
@@ -3107,8 +4782,8 @@ xfs_bmap_del_extent(
len = del->br_blockcount;
do_div(bno, mp->m_sb.sb_rextsize);
do_div(len, mp->m_sb.sb_rextsize);
- if ((error = xfs_rtfree_extent(ip->i_transp, bno,
- (xfs_extlen_t)len)))
+ error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
+ if (error)
goto done;
do_fx = 0;
nblks = len * mp->m_sb.sb_rextsize;
@@ -3131,11 +4806,11 @@ xfs_bmap_del_extent(
got.br_startblock, got.br_blockcount,
&i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
da_old = da_new = 0;
} else {
- da_old = STARTBLOCKVAL(got.br_startblock);
+ da_old = startblockval(got.br_startblock);
da_new = 0;
nblks = 0;
do_fx = 0;
@@ -3150,45 +4825,44 @@ xfs_bmap_del_extent(
/*
* Matches the whole extent. Delete the entry.
*/
- XFS_BMAP_TRACE_DELETE("3", ip, idx, 1, whichfork);
- xfs_iext_remove(ifp, idx, 1);
- ifp->if_lastex = idx;
+ xfs_iext_remove(ip, *idx, 1,
+ whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
+ --*idx;
if (delay)
break;
+
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
flags |= XFS_ILOG_CORE;
if (!cur) {
- flags |= XFS_ILOG_FEXT(whichfork);
+ flags |= xfs_ilog_fext(whichfork);
break;
}
- if ((error = xfs_bmbt_delete(cur, &i)))
+ if ((error = xfs_btree_delete(cur, &i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
break;
case 2:
/*
* Deleting the first part of the extent.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("2", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
xfs_bmbt_set_startoff(ep, del_endoff);
temp = got.br_blockcount - del->br_blockcount;
xfs_bmbt_set_blockcount(ep, temp);
- ifp->if_lastex = idx;
if (delay) {
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
da_old);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx,
- whichfork);
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
da_new = temp;
break;
}
xfs_bmbt_set_startblock(ep, del_endblock);
- XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
if (!cur) {
- flags |= XFS_ILOG_FEXT(whichfork);
+ flags |= xfs_ilog_fext(whichfork);
break;
}
if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
@@ -3202,21 +4876,19 @@ xfs_bmap_del_extent(
* Deleting the last part of the extent.
*/
temp = got.br_blockcount - del->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("1", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
- ifp->if_lastex = idx;
if (delay) {
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
da_old);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx,
- whichfork);
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
da_new = temp;
break;
}
- XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
if (!cur) {
- flags |= XFS_ILOG_FEXT(whichfork);
+ flags |= xfs_ilog_fext(whichfork);
break;
}
if ((error = xfs_bmbt_update(cur, got.br_startoff,
@@ -3231,7 +4903,7 @@ xfs_bmap_del_extent(
* Deleting the middle of the extent.
*/
temp = del->br_startoff - got.br_startoff;
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
new.br_startoff = del_endoff;
temp2 = got_endoff - del_endoff;
@@ -3246,10 +4918,10 @@ xfs_bmap_del_extent(
got.br_startblock, temp,
got.br_state)))
goto done;
- if ((error = xfs_bmbt_increment(cur, 0, &i)))
+ if ((error = xfs_btree_increment(cur, 0, &i)))
goto done;
cur->bc_rec.b = new;
- error = xfs_bmbt_insert(cur, &i);
+ error = xfs_btree_insert(cur, &i);
if (error && error != ENOSPC)
goto done;
/*
@@ -3268,7 +4940,7 @@ xfs_bmap_del_extent(
got.br_startblock,
temp, &i)))
goto done;
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
/*
* Update the btree record back
* to the original value.
@@ -3289,24 +4961,24 @@ xfs_bmap_del_extent(
error = XFS_ERROR(ENOSPC);
goto done;
}
- ASSERT(i == 1);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, done);
} else
- flags |= XFS_ILOG_FEXT(whichfork);
+ flags |= xfs_ilog_fext(whichfork);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
} else {
ASSERT(whichfork == XFS_DATA_FORK);
temp = xfs_bmap_worst_indlen(ip, temp);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
temp2 = xfs_bmap_worst_indlen(ip, temp2);
- new.br_startblock = NULLSTARTBLOCK((int)temp2);
+ new.br_startblock = nullstartblock((int)temp2);
da_new = temp + temp2;
while (da_new > da_old) {
if (temp) {
temp--;
da_new--;
xfs_bmbt_set_startblock(ep,
- NULLSTARTBLOCK((int)temp));
+ nullstartblock((int)temp));
}
if (da_new == da_old)
break;
@@ -3314,15 +4986,13 @@ xfs_bmap_del_extent(
temp2--;
da_new--;
new.br_startblock =
- NULLSTARTBLOCK((int)temp2);
+ nullstartblock((int)temp2);
}
}
}
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, whichfork);
- XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 1, &new, NULL,
- whichfork);
- xfs_iext_insert(ifp, idx + 1, 1, &new);
- ifp->if_lastex = idx + 1;
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ xfs_iext_insert(ip, *idx + 1, 1, &new, state);
+ ++*idx;
break;
}
/*
@@ -3340,2007 +5010,23 @@ xfs_bmap_del_extent(
* Adjust quota data.
*/
if (qfield)
- XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks);
+ xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
/*
* Account for change in delayed indirect blocks.
* Nothing to do for disk quota accounting here.
*/
ASSERT(da_old >= da_new);
- if (da_old > da_new)
- xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
- rsvd);
- if (delta) {
- /* DELTA: report the original extent. */
- if (delta->xed_startoff > got.br_startoff)
- delta->xed_startoff = got.br_startoff;
- if (delta->xed_blockcount < got.br_startoff+got.br_blockcount)
- delta->xed_blockcount = got.br_startoff +
- got.br_blockcount;
- }
-done:
- *logflagsp = flags;
- return error;
-}
-
-/*
- * Remove the entry "free" from the free item list. Prev points to the
- * previous entry, unless "free" is the head of the list.
- */
-STATIC void
-xfs_bmap_del_free(
- xfs_bmap_free_t *flist, /* free item list header */
- xfs_bmap_free_item_t *prev, /* previous item on list, if any */
- xfs_bmap_free_item_t *free) /* list item to be freed */
-{
- if (prev)
- prev->xbfi_next = free->xbfi_next;
- else
- flist->xbf_first = free->xbfi_next;
- flist->xbf_count--;
- kmem_zone_free(xfs_bmap_free_item_zone, free);
-}
-
-/*
- * Convert an extents-format file into a btree-format file.
- * The new file will have a root block (in the inode) and a single child block.
- */
-STATIC int /* error */
-xfs_bmap_extents_to_btree(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first-block-allocated */
- xfs_bmap_free_t *flist, /* blocks freed in xaction */
- xfs_btree_cur_t **curp, /* cursor returned to caller */
- int wasdel, /* converting a delayed alloc */
- int *logflagsp, /* inode logging flags */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_block_t *ablock; /* allocated (child) bt block */
- xfs_buf_t *abp; /* buffer for ablock */
- xfs_alloc_arg_t args; /* allocation arguments */
- xfs_bmbt_rec_t *arp; /* child record pointer */
- xfs_bmbt_block_t *block; /* btree root block */
- xfs_btree_cur_t *cur; /* bmap btree cursor */
- xfs_bmbt_rec_host_t *ep; /* extent record pointer */
- int error; /* error return value */
- xfs_extnum_t i, cnt; /* extent record index */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_bmbt_key_t *kp; /* root block key pointer */
- xfs_mount_t *mp; /* mount structure */
- xfs_extnum_t nextents; /* number of file extents */
- xfs_bmbt_ptr_t *pp; /* root block address pointer */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
- ASSERT(ifp->if_ext_max ==
- XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
- /*
- * Make space in the inode incore.
- */
- xfs_iroot_realloc(ip, 1, whichfork);
- ifp->if_flags |= XFS_IFBROOT;
- /*
- * Fill in the root.
- */
- block = ifp->if_broot;
- block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
- block->bb_level = cpu_to_be16(1);
- block->bb_numrecs = cpu_to_be16(1);
- block->bb_leftsib = cpu_to_be64(NULLDFSBNO);
- block->bb_rightsib = cpu_to_be64(NULLDFSBNO);
- /*
- * Need a cursor. Can't allocate until bb_level is filled in.
- */
- mp = ip->i_mount;
- cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
- whichfork);
- cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.flist = flist;
- cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
- /*
- * Convert to a btree with two levels, one record in root.
- */
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
- args.tp = tp;
- args.mp = mp;
- args.firstblock = *firstblock;
- if (*firstblock == NULLFSBLOCK) {
- args.type = XFS_ALLOCTYPE_START_BNO;
- args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
- } else if (flist->xbf_low) {
- args.type = XFS_ALLOCTYPE_START_BNO;
- args.fsbno = *firstblock;
- } else {
- args.type = XFS_ALLOCTYPE_NEAR_BNO;
- args.fsbno = *firstblock;
- }
- args.minlen = args.maxlen = args.prod = 1;
- args.total = args.minleft = args.alignment = args.mod = args.isfl =
- args.minalignslop = 0;
- args.wasdel = wasdel;
- *logflagsp = 0;
- if ((error = xfs_alloc_vextent(&args))) {
- xfs_iroot_realloc(ip, -1, whichfork);
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- return error;
- }
- /*
- * Allocation can't fail, the space was reserved.
- */
- ASSERT(args.fsbno != NULLFSBLOCK);
- ASSERT(*firstblock == NULLFSBLOCK ||
- args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
- (flist->xbf_low &&
- args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
- *firstblock = cur->bc_private.b.firstblock = args.fsbno;
- cur->bc_private.b.allocated++;
- ip->i_d.di_nblocks++;
- XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
- abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
- /*
- * Fill in the child block.
- */
- ablock = XFS_BUF_TO_BMBT_BLOCK(abp);
- ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
- ablock->bb_level = 0;
- ablock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
- ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
- arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- for (cnt = i = 0; i < nextents; i++) {
- ep = xfs_iext_get_ext(ifp, i);
- if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) {
- arp->l0 = cpu_to_be64(ep->l0);
- arp->l1 = cpu_to_be64(ep->l1);
- arp++; cnt++;
- }
- }
- ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
- ablock->bb_numrecs = cpu_to_be16(cnt);
- /*
- * Fill in the root key and pointer.
- */
- kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
- arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
- kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
- pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
- *pp = cpu_to_be64(args.fsbno);
- /*
- * Do all this logging at the end so that
- * the root is at the right level.
- */
- xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS);
- xfs_bmbt_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
- ASSERT(*curp == NULL);
- *curp = cur;
- *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork);
- return 0;
-}
-
-/*
- * Helper routine to reset inode di_forkoff field when switching
- * attribute fork from local to extent format - we reset it where
- * possible to make space available for inline data fork extents.
- */
-STATIC void
-xfs_bmap_forkoff_reset(
- xfs_mount_t *mp,
- xfs_inode_t *ip,
- int whichfork)
-{
- if (whichfork == XFS_ATTR_FORK &&
- (ip->i_d.di_format != XFS_DINODE_FMT_DEV) &&
- (ip->i_d.di_format != XFS_DINODE_FMT_UUID) &&
- (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
- ((mp->m_attroffset >> 3) > ip->i_d.di_forkoff)) {
- ip->i_d.di_forkoff = mp->m_attroffset >> 3;
- ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) /
- (uint)sizeof(xfs_bmbt_rec_t);
- ip->i_afp->if_ext_max = XFS_IFORK_ASIZE(ip) /
- (uint)sizeof(xfs_bmbt_rec_t);
- }
-}
-
-/*
- * Convert a local file to an extents file.
- * This code is out of bounds for data forks of regular files,
- * since the file data needs to get logged so things will stay consistent.
- * (The bmap-level manipulations are ok, though).
- */
-STATIC int /* error */
-xfs_bmap_local_to_extents(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated in xaction */
- xfs_extlen_t total, /* total blocks needed by transaction */
- int *logflagsp, /* inode logging flags */
- int whichfork) /* data or attr fork */
-{
- int error; /* error return value */
- int flags; /* logging flags returned */
- xfs_ifork_t *ifp; /* inode fork pointer */
-
- /*
- * We don't want to deal with the case of keeping inode data inline yet.
- * So sending the data fork of a regular inode is invalid.
- */
- ASSERT(!((ip->i_d.di_mode & S_IFMT) == S_IFREG &&
- whichfork == XFS_DATA_FORK));
- ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
- flags = 0;
- error = 0;
- if (ifp->if_bytes) {
- xfs_alloc_arg_t args; /* allocation arguments */
- xfs_buf_t *bp; /* buffer for extent block */
- xfs_bmbt_rec_host_t *ep;/* extent record pointer */
-
- args.tp = tp;
- args.mp = ip->i_mount;
- args.firstblock = *firstblock;
- ASSERT((ifp->if_flags &
- (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
- /*
- * Allocate a block. We know we need only one, since the
- * file currently fits in an inode.
- */
- if (*firstblock == NULLFSBLOCK) {
- args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
- args.type = XFS_ALLOCTYPE_START_BNO;
- } else {
- args.fsbno = *firstblock;
- args.type = XFS_ALLOCTYPE_NEAR_BNO;
- }
- args.total = total;
- args.mod = args.minleft = args.alignment = args.wasdel =
- args.isfl = args.minalignslop = 0;
- args.minlen = args.maxlen = args.prod = 1;
- if ((error = xfs_alloc_vextent(&args)))
- goto done;
- /*
- * Can't fail, the space was reserved.
- */
- ASSERT(args.fsbno != NULLFSBLOCK);
- ASSERT(args.len == 1);
- *firstblock = args.fsbno;
- bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
- memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data,
- ifp->if_bytes);
- xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
- xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
- xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
- xfs_iext_add(ifp, 0, 1);
- ep = xfs_iext_get_ext(ifp, 0);
- xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
- XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork);
- XFS_IFORK_NEXT_SET(ip, whichfork, 1);
- ip->i_d.di_nblocks = 1;
- XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip,
- XFS_TRANS_DQ_BCOUNT, 1L);
- flags |= XFS_ILOG_FEXT(whichfork);
- } else {
- ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
- xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
+ if (da_old > da_new) {
+ xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+ (int64_t)(da_old - da_new), 0);
}
- ifp->if_flags &= ~XFS_IFINLINE;
- ifp->if_flags |= XFS_IFEXTENTS;
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
- flags |= XFS_ILOG_CORE;
done:
*logflagsp = flags;
return error;
}
/*
- * Search the extent records for the entry containing block bno.
- * If bno lies in a hole, point to the next entry. If bno lies
- * past eof, *eofp will be set, and *prevp will contain the last
- * entry (null if none). Else, *lastxp will be set to the index
- * of the found entry; *gotp will contain the entry.
- */
-xfs_bmbt_rec_host_t * /* pointer to found extent entry */
-xfs_bmap_search_multi_extents(
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_fileoff_t bno, /* block number searched for */
- int *eofp, /* out: end of file found */
- xfs_extnum_t *lastxp, /* out: last extent index */
- xfs_bmbt_irec_t *gotp, /* out: extent entry found */
- xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
-{
- xfs_bmbt_rec_host_t *ep; /* extent record pointer */
- xfs_extnum_t lastx; /* last extent index */
-
- /*
- * Initialize the extent entry structure to catch access to
- * uninitialized br_startblock field.
- */
- gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
- gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
- gotp->br_state = XFS_EXT_INVALID;
-#if XFS_BIG_BLKNOS
- gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
-#else
- gotp->br_startblock = 0xffffa5a5;
-#endif
- prevp->br_startoff = NULLFILEOFF;
-
- ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
- if (lastx > 0) {
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
- }
- if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
- xfs_bmbt_get_all(ep, gotp);
- *eofp = 0;
- } else {
- if (lastx > 0) {
- *gotp = *prevp;
- }
- *eofp = 1;
- ep = NULL;
- }
- *lastxp = lastx;
- return ep;
-}
-
-/*
- * Search the extents list for the inode, for the extent containing bno.
- * If bno lies in a hole, point to the next entry. If bno lies past eof,
- * *eofp will be set, and *prevp will contain the last entry (null if none).
- * Else, *lastxp will be set to the index of the found
- * entry; *gotp will contain the entry.
- */
-STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
-xfs_bmap_search_extents(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fileoff_t bno, /* block number searched for */
- int fork, /* data or attr fork */
- int *eofp, /* out: end of file found */
- xfs_extnum_t *lastxp, /* out: last extent index */
- xfs_bmbt_irec_t *gotp, /* out: extent entry found */
- xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
-{
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_bmbt_rec_host_t *ep; /* extent record pointer */
-
- XFS_STATS_INC(xs_look_exlist);
- ifp = XFS_IFORK_PTR(ip, fork);
-
- ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
-
- if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
- !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
- xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
- "Access to block zero in inode %llu "
- "start_block: %llx start_off: %llx "
- "blkcnt: %llx extent-state: %x lastx: %x\n",
- (unsigned long long)ip->i_ino,
- (unsigned long long)gotp->br_startblock,
- (unsigned long long)gotp->br_startoff,
- (unsigned long long)gotp->br_blockcount,
- gotp->br_state, *lastxp);
- *lastxp = NULLEXTNUM;
- *eofp = 1;
- return NULL;
- }
- return ep;
-}
-
-
-#ifdef XFS_BMAP_TRACE
-ktrace_t *xfs_bmap_trace_buf;
-
-/*
- * Add a bmap trace buffer entry. Base routine for the others.
- */
-STATIC void
-xfs_bmap_trace_addentry(
- int opcode, /* operation */
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(ies) */
- xfs_extnum_t cnt, /* count of entries, 1 or 2 */
- xfs_bmbt_rec_host_t *r1, /* first record */
- xfs_bmbt_rec_host_t *r2, /* second record or null */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_rec_host_t tr2;
-
- ASSERT(cnt == 1 || cnt == 2);
- ASSERT(r1 != NULL);
- if (cnt == 1) {
- ASSERT(r2 == NULL);
- r2 = &tr2;
- memset(&tr2, 0, sizeof(tr2));
- } else
- ASSERT(r2 != NULL);
- ktrace_enter(xfs_bmap_trace_buf,
- (void *)(__psint_t)(opcode | (whichfork << 16)),
- (void *)fname, (void *)desc, (void *)ip,
- (void *)(__psint_t)idx,
- (void *)(__psint_t)cnt,
- (void *)(__psunsigned_t)(ip->i_ino >> 32),
- (void *)(__psunsigned_t)(unsigned)ip->i_ino,
- (void *)(__psunsigned_t)(r1->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l0),
- (void *)(__psunsigned_t)(r1->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l1),
- (void *)(__psunsigned_t)(r2->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l0),
- (void *)(__psunsigned_t)(r2->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l1)
- );
- ASSERT(ip->i_xtrace);
- ktrace_enter(ip->i_xtrace,
- (void *)(__psint_t)(opcode | (whichfork << 16)),
- (void *)fname, (void *)desc, (void *)ip,
- (void *)(__psint_t)idx,
- (void *)(__psint_t)cnt,
- (void *)(__psunsigned_t)(ip->i_ino >> 32),
- (void *)(__psunsigned_t)(unsigned)ip->i_ino,
- (void *)(__psunsigned_t)(r1->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l0),
- (void *)(__psunsigned_t)(r1->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l1),
- (void *)(__psunsigned_t)(r2->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l0),
- (void *)(__psunsigned_t)(r2->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l1)
- );
-}
-
-/*
- * Add bmap trace entry prior to a call to xfs_iext_remove.
- */
-STATIC void
-xfs_bmap_trace_delete(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) deleted */
- xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
- int whichfork) /* data or attr fork */
-{
- xfs_ifork_t *ifp; /* inode fork pointer */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
- cnt, xfs_iext_get_ext(ifp, idx),
- cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL,
- whichfork);
-}
-
-/*
- * Add bmap trace entry prior to a call to xfs_iext_insert, or
- * reading in the extents list from the disk (in the btree).
- */
-STATIC void
-xfs_bmap_trace_insert(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) inserted */
- xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
- xfs_bmbt_irec_t *r1, /* inserted record 1 */
- xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_rec_host_t tr1; /* compressed record 1 */
- xfs_bmbt_rec_host_t tr2; /* compressed record 2 if needed */
-
- xfs_bmbt_set_all(&tr1, r1);
- if (cnt == 2) {
- ASSERT(r2 != NULL);
- xfs_bmbt_set_all(&tr2, r2);
- } else {
- ASSERT(cnt == 1);
- ASSERT(r2 == NULL);
- }
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx,
- cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork);
-}
-
-/*
- * Add bmap trace entry after updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_post_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry updated */
- int whichfork) /* data or attr fork */
-{
- xfs_ifork_t *ifp; /* inode fork pointer */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
- 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork);
-}
-
-/*
- * Add bmap trace entry prior to updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_pre_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry to be updated */
- int whichfork) /* data or attr fork */
-{
- xfs_ifork_t *ifp; /* inode fork pointer */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
- xfs_iext_get_ext(ifp, idx), NULL, whichfork);
-}
-#endif /* XFS_BMAP_TRACE */
-
-/*
- * Compute the worst-case number of indirect blocks that will be used
- * for ip's delayed extent of length "len".
- */
-STATIC xfs_filblks_t
-xfs_bmap_worst_indlen(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_filblks_t len) /* delayed extent length */
-{
- int level; /* btree level number */
- int maxrecs; /* maximum record count at this level */
- xfs_mount_t *mp; /* mount structure */
- xfs_filblks_t rval; /* return value */
-
- mp = ip->i_mount;
- maxrecs = mp->m_bmap_dmxr[0];
- for (level = 0, rval = 0;
- level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
- level++) {
- len += maxrecs - 1;
- do_div(len, maxrecs);
- rval += len;
- if (len == 1)
- return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
- level - 1;
- if (level == 0)
- maxrecs = mp->m_bmap_dmxr[1];
- }
- return rval;
-}
-
-#if defined(XFS_RW_TRACE)
-STATIC void
-xfs_bunmap_trace(
- xfs_inode_t *ip,
- xfs_fileoff_t bno,
- xfs_filblks_t len,
- int flags,
- inst_t *ra)
-{
- if (ip->i_rwtrace == NULL)
- return;
- ktrace_enter(ip->i_rwtrace,
- (void *)(__psint_t)XFS_BUNMAP,
- (void *)ip,
- (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
- (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
- (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff),
- (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
- (void *)(__psint_t)len,
- (void *)(__psint_t)flags,
- (void *)(unsigned long)current_cpu(),
- (void *)ra,
- (void *)0,
- (void *)0,
- (void *)0,
- (void *)0,
- (void *)0,
- (void *)0);
-}
-#endif
-
-/*
- * Convert inode from non-attributed to attributed.
- * Must not be in a transaction, ip must not be locked.
- */
-int /* error code */
-xfs_bmap_add_attrfork(
- xfs_inode_t *ip, /* incore inode pointer */
- int size, /* space new attribute needs */
- int rsvd) /* xact may use reserved blks */
-{
- xfs_fsblock_t firstblock; /* 1st block/ag allocated */
- xfs_bmap_free_t flist; /* freed extent records */
- xfs_mount_t *mp; /* mount structure */
- xfs_trans_t *tp; /* transaction pointer */
- int blks; /* space reservation */
- int version = 1; /* superblock attr version */
- int committed; /* xaction was committed */
- int logflags; /* logging flags */
- int error; /* error return value */
-
- ASSERT(XFS_IFORK_Q(ip) == 0);
- ASSERT(ip->i_df.if_ext_max ==
- XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
-
- mp = ip->i_mount;
- ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
- tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
- blks = XFS_ADDAFORK_SPACE_RES(mp);
- if (rsvd)
- tp->t_flags |= XFS_TRANS_RESERVE;
- if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
- XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
- goto error0;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ?
- XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
- XFS_QMOPT_RES_REGBLKS);
- if (error) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
- return error;
- }
- if (XFS_IFORK_Q(ip))
- goto error1;
- if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
- /*
- * For inodes coming from pre-6.2 filesystems.
- */
- ASSERT(ip->i_d.di_aformat == 0);
- ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
- }
- ASSERT(ip->i_d.di_anextents == 0);
- VN_HOLD(XFS_ITOV(ip));
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- switch (ip->i_d.di_format) {
- case XFS_DINODE_FMT_DEV:
- ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
- break;
- case XFS_DINODE_FMT_UUID:
- ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
- break;
- case XFS_DINODE_FMT_LOCAL:
- case XFS_DINODE_FMT_EXTENTS:
- case XFS_DINODE_FMT_BTREE:
- ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
- if (!ip->i_d.di_forkoff)
- ip->i_d.di_forkoff = mp->m_attroffset >> 3;
- else if (mp->m_flags & XFS_MOUNT_ATTR2)
- version = 2;
- break;
- default:
- ASSERT(0);
- error = XFS_ERROR(EINVAL);
- goto error1;
- }
- ip->i_df.if_ext_max =
- XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
- ASSERT(ip->i_afp == NULL);
- ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
- ip->i_afp->if_ext_max =
- XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
- ip->i_afp->if_flags = XFS_IFEXTENTS;
- logflags = 0;
- XFS_BMAP_INIT(&flist, &firstblock);
- switch (ip->i_d.di_format) {
- case XFS_DINODE_FMT_LOCAL:
- error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
- &logflags);
- break;
- case XFS_DINODE_FMT_EXTENTS:
- error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
- &flist, &logflags);
- break;
- case XFS_DINODE_FMT_BTREE:
- error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
- &logflags);
- break;
- default:
- error = 0;
- break;
- }
- if (logflags)
- xfs_trans_log_inode(tp, ip, logflags);
- if (error)
- goto error2;
- if (!xfs_sb_version_hasattr(&mp->m_sb) ||
- (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
- __int64_t sbfields = 0;
-
- spin_lock(&mp->m_sb_lock);
- if (!xfs_sb_version_hasattr(&mp->m_sb)) {
- xfs_sb_version_addattr(&mp->m_sb);
- sbfields |= XFS_SB_VERSIONNUM;
- }
- if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
- xfs_sb_version_addattr2(&mp->m_sb);
- sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
- }
- if (sbfields) {
- spin_unlock(&mp->m_sb_lock);
- xfs_mod_sb(tp, sbfields);
- } else
- spin_unlock(&mp->m_sb_lock);
- }
- if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
- goto error2;
- error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES);
- ASSERT(ip->i_df.if_ext_max ==
- XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
- return error;
-error2:
- xfs_bmap_cancel(&flist);
-error1:
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-error0:
- xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
- ASSERT(ip->i_df.if_ext_max ==
- XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
- return error;
-}
-
-/*
- * Add the extent to the list of extents to be free at transaction end.
- * The list is maintained sorted (by block number).
- */
-/* ARGSUSED */
-void
-xfs_bmap_add_free(
- xfs_fsblock_t bno, /* fs block number of extent */
- xfs_filblks_t len, /* length of extent */
- xfs_bmap_free_t *flist, /* list of extents */
- xfs_mount_t *mp) /* mount point structure */
-{
- xfs_bmap_free_item_t *cur; /* current (next) element */
- xfs_bmap_free_item_t *new; /* new element */
- xfs_bmap_free_item_t *prev; /* previous element */
-#ifdef DEBUG
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
-
- ASSERT(bno != NULLFSBLOCK);
- ASSERT(len > 0);
- ASSERT(len <= MAXEXTLEN);
- ASSERT(!ISNULLSTARTBLOCK(bno));
- agno = XFS_FSB_TO_AGNO(mp, bno);
- agbno = XFS_FSB_TO_AGBNO(mp, bno);
- ASSERT(agno < mp->m_sb.sb_agcount);
- ASSERT(agbno < mp->m_sb.sb_agblocks);
- ASSERT(len < mp->m_sb.sb_agblocks);
- ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
-#endif
- ASSERT(xfs_bmap_free_item_zone != NULL);
- new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
- new->xbfi_startblock = bno;
- new->xbfi_blockcount = (xfs_extlen_t)len;
- for (prev = NULL, cur = flist->xbf_first;
- cur != NULL;
- prev = cur, cur = cur->xbfi_next) {
- if (cur->xbfi_startblock >= bno)
- break;
- }
- if (prev)
- prev->xbfi_next = new;
- else
- flist->xbf_first = new;
- new->xbfi_next = cur;
- flist->xbf_count++;
-}
-
-/*
- * Compute and fill in the value of the maximum depth of a bmap btree
- * in this filesystem. Done once, during mount.
- */
-void
-xfs_bmap_compute_maxlevels(
- xfs_mount_t *mp, /* file system mount structure */
- int whichfork) /* data or attr fork */
-{
- int level; /* btree level */
- uint maxblocks; /* max blocks at this level */
- uint maxleafents; /* max leaf entries possible */
- int maxrootrecs; /* max records in root block */
- int minleafrecs; /* min records in leaf block */
- int minnoderecs; /* min records in node block */
- int sz; /* root block size */
-
- /*
- * The maximum number of extents in a file, hence the maximum
- * number of leaf entries, is controlled by the type of di_nextents
- * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
- * (a signed 16-bit number, xfs_aextnum_t).
- *
- * Note that we can no longer assume that if we are in ATTR1 that
- * the fork offset of all the inodes will be (m_attroffset >> 3)
- * because we could have mounted with ATTR2 and then mounted back
- * with ATTR1, keeping the di_forkoff's fixed but probably at
- * various positions. Therefore, for both ATTR1 and ATTR2
- * we have to assume the worst case scenario of a minimum size
- * available.
- */
- if (whichfork == XFS_DATA_FORK) {
- maxleafents = MAXEXTNUM;
- sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
- } else {
- maxleafents = MAXAEXTNUM;
- sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
- }
- maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0);
- minleafrecs = mp->m_bmap_dmnr[0];
- minnoderecs = mp->m_bmap_dmnr[1];
- maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
- for (level = 1; maxblocks > 1; level++) {
- if (maxblocks <= maxrootrecs)
- maxblocks = 1;
- else
- maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
- }
- mp->m_bm_maxlevels[whichfork] = level;
-}
-
-/*
- * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
- * caller. Frees all the extents that need freeing, which must be done
- * last due to locking considerations. We never free any extents in
- * the first transaction. This is to allow the caller to make the first
- * transaction a synchronous one so that the pointers to the data being
- * broken in this transaction will be permanent before the data is actually
- * freed. This is necessary to prevent blocks from being reallocated
- * and written to before the free and reallocation are actually permanent.
- * We do not just make the first transaction synchronous here, because
- * there are more efficient ways to gain the same protection in some cases
- * (see the file truncation code).
- *
- * Return 1 if the given transaction was committed and a new one
- * started, and 0 otherwise in the committed parameter.
- */
-/*ARGSUSED*/
-int /* error */
-xfs_bmap_finish(
- xfs_trans_t **tp, /* transaction pointer addr */
- xfs_bmap_free_t *flist, /* i/o: list extents to free */
- int *committed) /* xact committed or not */
-{
- xfs_efd_log_item_t *efd; /* extent free data */
- xfs_efi_log_item_t *efi; /* extent free intention */
- int error; /* error return value */
- xfs_bmap_free_item_t *free; /* free extent item */
- unsigned int logres; /* new log reservation */
- unsigned int logcount; /* new log count */
- xfs_mount_t *mp; /* filesystem mount structure */
- xfs_bmap_free_item_t *next; /* next item on free list */
- xfs_trans_t *ntp; /* new transaction pointer */
-
- ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
- if (flist->xbf_count == 0) {
- *committed = 0;
- return 0;
- }
- ntp = *tp;
- efi = xfs_trans_get_efi(ntp, flist->xbf_count);
- for (free = flist->xbf_first; free; free = free->xbfi_next)
- xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
- free->xbfi_blockcount);
- logres = ntp->t_log_res;
- logcount = ntp->t_log_count;
- ntp = xfs_trans_dup(*tp);
- error = xfs_trans_commit(*tp, 0);
- *tp = ntp;
- *committed = 1;
- /*
- * We have a new transaction, so we should return committed=1,
- * even though we're returning an error.
- */
- if (error) {
- return error;
- }
- if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
- logcount)))
- return error;
- efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
- for (free = flist->xbf_first; free != NULL; free = next) {
- next = free->xbfi_next;
- if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
- free->xbfi_blockcount))) {
- /*
- * The bmap free list will be cleaned up at a
- * higher level. The EFI will be canceled when
- * this transaction is aborted.
- * Need to force shutdown here to make sure it
- * happens, since this transaction may not be
- * dirty yet.
- */
- mp = ntp->t_mountp;
- if (!XFS_FORCED_SHUTDOWN(mp))
- xfs_force_shutdown(mp,
- (error == EFSCORRUPTED) ?
- SHUTDOWN_CORRUPT_INCORE :
- SHUTDOWN_META_IO_ERROR);
- return error;
- }
- xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
- free->xbfi_blockcount);
- xfs_bmap_del_free(flist, NULL, free);
- }
- return 0;
-}
-
-/*
- * Free up any items left in the list.
- */
-void
-xfs_bmap_cancel(
- xfs_bmap_free_t *flist) /* list of bmap_free_items */
-{
- xfs_bmap_free_item_t *free; /* free list item */
- xfs_bmap_free_item_t *next;
-
- if (flist->xbf_count == 0)
- return;
- ASSERT(flist->xbf_first != NULL);
- for (free = flist->xbf_first; free; free = next) {
- next = free->xbfi_next;
- xfs_bmap_del_free(flist, NULL, free);
- }
- ASSERT(flist->xbf_count == 0);
-}
-
-/*
- * Returns the file-relative block number of the first unused block(s)
- * in the file with at least "len" logically contiguous blocks free.
- * This is the lowest-address hole if the file has holes, else the first block
- * past the end of file.
- * Return 0 if the file is currently local (in-inode).
- */
-int /* error */
-xfs_bmap_first_unused(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode */
- xfs_extlen_t len, /* size of hole to find */
- xfs_fileoff_t *first_unused, /* unused block */
- int whichfork) /* data or attr fork */
-{
- int error; /* error return value */
- int idx; /* extent record index */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_fileoff_t lastaddr; /* last block number seen */
- xfs_fileoff_t lowest; /* lowest useful block */
- xfs_fileoff_t max; /* starting useful block */
- xfs_fileoff_t off; /* offset for this block */
- xfs_extnum_t nextents; /* number of extent entries */
-
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
- XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
- XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
- *first_unused = 0;
- return 0;
- }
- ifp = XFS_IFORK_PTR(ip, whichfork);
- if (!(ifp->if_flags & XFS_IFEXTENTS) &&
- (error = xfs_iread_extents(tp, ip, whichfork)))
- return error;
- lowest = *first_unused;
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
- xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
- off = xfs_bmbt_get_startoff(ep);
- /*
- * See if the hole before this extent will work.
- */
- if (off >= lowest + len && off - max >= len) {
- *first_unused = max;
- return 0;
- }
- lastaddr = off + xfs_bmbt_get_blockcount(ep);
- max = XFS_FILEOFF_MAX(lastaddr, lowest);
- }
- *first_unused = max;
- return 0;
-}
-
-/*
- * Returns the file-relative block number of the last block + 1 before
- * last_block (input value) in the file.
- * This is not based on i_size, it is based on the extent records.
- * Returns 0 for local files, as they do not have extent records.
- */
-int /* error */
-xfs_bmap_last_before(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode */
- xfs_fileoff_t *last_block, /* last block */
- int whichfork) /* data or attr fork */
-{
- xfs_fileoff_t bno; /* input file offset */
- int eof; /* hit end of file */
- xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
- int error; /* error return value */
- xfs_bmbt_irec_t got; /* current extent value */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_extnum_t lastx; /* last extent used */
- xfs_bmbt_irec_t prev; /* previous extent value */
-
- if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
- return XFS_ERROR(EIO);
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
- *last_block = 0;
- return 0;
- }
- ifp = XFS_IFORK_PTR(ip, whichfork);
- if (!(ifp->if_flags & XFS_IFEXTENTS) &&
- (error = xfs_iread_extents(tp, ip, whichfork)))
- return error;
- bno = *last_block - 1;
- ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
- &prev);
- if (eof || xfs_bmbt_get_startoff(ep) > bno) {
- if (prev.br_startoff == NULLFILEOFF)
- *last_block = 0;
- else
- *last_block = prev.br_startoff + prev.br_blockcount;
- }
- /*
- * Otherwise *last_block is already the right answer.
- */
- return 0;
-}
-
-/*
- * Returns the file-relative block number of the first block past eof in
- * the file. This is not based on i_size, it is based on the extent records.
- * Returns 0 for local files, as they do not have extent records.
- */
-int /* error */
-xfs_bmap_last_offset(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode */
- xfs_fileoff_t *last_block, /* last block */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
- int error; /* error return value */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_extnum_t nextents; /* number of extent entries */
-
- if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
- return XFS_ERROR(EIO);
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
- *last_block = 0;
- return 0;
- }
- ifp = XFS_IFORK_PTR(ip, whichfork);
- if (!(ifp->if_flags & XFS_IFEXTENTS) &&
- (error = xfs_iread_extents(tp, ip, whichfork)))
- return error;
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- if (!nextents) {
- *last_block = 0;
- return 0;
- }
- ep = xfs_iext_get_ext(ifp, nextents - 1);
- *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep);
- return 0;
-}
-
-/*
- * Returns whether the selected fork of the inode has exactly one
- * block or not. For the data fork we check this matches di_size,
- * implying the file's range is 0..bsize-1.
- */
-int /* 1=>1 block, 0=>otherwise */
-xfs_bmap_one_block(
- xfs_inode_t *ip, /* incore inode */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
- xfs_ifork_t *ifp; /* inode fork pointer */
- int rval; /* return value */
- xfs_bmbt_irec_t s; /* internal version of extent */
-
-#ifndef DEBUG
- if (whichfork == XFS_DATA_FORK) {
- return ((ip->i_d.di_mode & S_IFMT) == S_IFREG) ?
- (ip->i_size == ip->i_mount->m_sb.sb_blocksize) :
- (ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
- }
-#endif /* !DEBUG */
- if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
- return 0;
- if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
- return 0;
- ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(ifp->if_flags & XFS_IFEXTENTS);
- ep = xfs_iext_get_ext(ifp, 0);
- xfs_bmbt_get_all(ep, &s);
- rval = s.br_startoff == 0 && s.br_blockcount == 1;
- if (rval && whichfork == XFS_DATA_FORK)
- ASSERT(ip->i_size == ip->i_mount->m_sb.sb_blocksize);
- return rval;
-}
-
-/*
- * Read in the extents to if_extents.
- * All inode fields are set up by caller, we just traverse the btree
- * and copy the records in. If the file system cannot contain unwritten
- * extents, the records are checked for no "state" flags.
- */
-int /* error */
-xfs_bmap_read_extents(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_block_t *block; /* current btree block */
- xfs_fsblock_t bno; /* block # of "block" */
- xfs_buf_t *bp; /* buffer for "block" */
- int error; /* error return value */
- xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
- xfs_extnum_t i, j; /* index into the extents list */
- xfs_ifork_t *ifp; /* fork structure */
- int level; /* btree level, for checking */
- xfs_mount_t *mp; /* file system mount structure */
- __be64 *pp; /* pointer to block address */
- /* REFERENCED */
- xfs_extnum_t room; /* number of entries there's room for */
-
- bno = NULLFSBLOCK;
- mp = ip->i_mount;
- ifp = XFS_IFORK_PTR(ip, whichfork);
- exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
- XFS_EXTFMT_INODE(ip);
- block = ifp->if_broot;
- /*
- * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
- */
- level = be16_to_cpu(block->bb_level);
- ASSERT(level > 0);
- pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
- bno = be64_to_cpu(*pp);
- ASSERT(bno != NULLDFSBNO);
- ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
- ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
- /*
- * Go down the tree until leaf level is reached, following the first
- * pointer (leftmost) at each level.
- */
- while (level-- > 0) {
- if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
- XFS_BMAP_BTREE_REF)))
- return error;
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- XFS_WANT_CORRUPTED_GOTO(
- XFS_BMAP_SANITY_CHECK(mp, block, level),
- error0);
- if (level == 0)
- break;
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
- bno = be64_to_cpu(*pp);
- XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
- xfs_trans_brelse(tp, bp);
- }
- /*
- * Here with bp and block set to the leftmost leaf node in the tree.
- */
- room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- i = 0;
- /*
- * Loop over all leaf nodes. Copy information to the extent records.
- */
- for (;;) {
- xfs_bmbt_rec_t *frp;
- xfs_fsblock_t nextbno;
- xfs_extnum_t num_recs;
- xfs_extnum_t start;
-
-
- num_recs = be16_to_cpu(block->bb_numrecs);
- if (unlikely(i + num_recs > room)) {
- ASSERT(i + num_recs <= room);
- xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
- "corrupt dinode %Lu, (btree extents).",
- (unsigned long long) ip->i_ino);
- XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount);
- goto error0;
- }
- XFS_WANT_CORRUPTED_GOTO(
- XFS_BMAP_SANITY_CHECK(mp, block, 0),
- error0);
- /*
- * Read-ahead the next leaf block, if any.
- */
- nextbno = be64_to_cpu(block->bb_rightsib);
- if (nextbno != NULLFSBLOCK)
- xfs_btree_reada_bufl(mp, nextbno, 1);
- /*
- * Copy records into the extent records.
- */
- frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
- start = i;
- for (j = 0; j < num_recs; j++, i++, frp++) {
- xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
- trp->l0 = be64_to_cpu(frp->l0);
- trp->l1 = be64_to_cpu(frp->l1);
- }
- if (exntf == XFS_EXTFMT_NOSTATE) {
- /*
- * Check all attribute bmap btree records and
- * any "older" data bmap btree records for a
- * set bit in the "extent flag" position.
- */
- if (unlikely(xfs_check_nostate_extents(ifp,
- start, num_recs))) {
- XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount);
- goto error0;
- }
- }
- xfs_trans_brelse(tp, bp);
- bno = nextbno;
- /*
- * If we've reached the end, stop.
- */
- if (bno == NULLFSBLOCK)
- break;
- if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
- XFS_BMAP_BTREE_REF)))
- return error;
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- }
- ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
- ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
- XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
- return 0;
-error0:
- xfs_trans_brelse(tp, bp);
- return XFS_ERROR(EFSCORRUPTED);
-}
-
-#ifdef XFS_BMAP_TRACE
-/*
- * Add bmap trace insert entries for all the contents of the extent records.
- */
-void
-xfs_bmap_trace_exlist(
- const char *fname, /* function name */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t cnt, /* count of entries in the list */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_rec_host_t *ep; /* current extent record */
- xfs_extnum_t idx; /* extent record index */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_bmbt_irec_t s; /* file extent record */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
- for (idx = 0; idx < cnt; idx++) {
- ep = xfs_iext_get_ext(ifp, idx);
- xfs_bmbt_get_all(ep, &s);
- XFS_BMAP_TRACE_INSERT("exlist", ip, idx, 1, &s, NULL,
- whichfork);
- }
-}
-#endif
-
-#ifdef DEBUG
-/*
- * Validate that the bmbt_irecs being returned from bmapi are valid
- * given the callers original parameters. Specifically check the
- * ranges of the returned irecs to ensure that they only extent beyond
- * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
- */
-STATIC void
-xfs_bmap_validate_ret(
- xfs_fileoff_t bno,
- xfs_filblks_t len,
- int flags,
- xfs_bmbt_irec_t *mval,
- int nmap,
- int ret_nmap)
-{
- int i; /* index to map values */
-
- ASSERT(ret_nmap <= nmap);
-
- for (i = 0; i < ret_nmap; i++) {
- ASSERT(mval[i].br_blockcount > 0);
- if (!(flags & XFS_BMAPI_ENTIRE)) {
- ASSERT(mval[i].br_startoff >= bno);
- ASSERT(mval[i].br_blockcount <= len);
- ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
- bno + len);
- } else {
- ASSERT(mval[i].br_startoff < bno + len);
- ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
- bno);
- }
- ASSERT(i == 0 ||
- mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
- mval[i].br_startoff);
- if ((flags & XFS_BMAPI_WRITE) && !(flags & XFS_BMAPI_DELAY))
- ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
- mval[i].br_startblock != HOLESTARTBLOCK);
- ASSERT(mval[i].br_state == XFS_EXT_NORM ||
- mval[i].br_state == XFS_EXT_UNWRITTEN);
- }
-}
-#endif /* DEBUG */
-
-
-/*
- * Map file blocks to filesystem blocks.
- * File range is given by the bno/len pair.
- * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set)
- * into a hole or past eof.
- * Only allocates blocks from a single allocation group,
- * to avoid locking problems.
- * The returned value in "firstblock" from the first call in a transaction
- * must be remembered and presented to subsequent calls in "firstblock".
- * An upper bound for the number of blocks to be allocated is supplied to
- * the first call in "total"; if no allocation group has that many free
- * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
- */
-int /* error */
-xfs_bmapi(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode */
- xfs_fileoff_t bno, /* starting file offs. mapped */
- xfs_filblks_t len, /* length to map in file */
- int flags, /* XFS_BMAPI_... */
- xfs_fsblock_t *firstblock, /* first allocated block
- controls a.g. for allocs */
- xfs_extlen_t total, /* total blocks needed */
- xfs_bmbt_irec_t *mval, /* output: map values */
- int *nmap, /* i/o: mval size/count */
- xfs_bmap_free_t *flist, /* i/o: list extents to free */
- xfs_extdelta_t *delta) /* o: change made to incore extents */
-{
- xfs_fsblock_t abno; /* allocated block number */
- xfs_extlen_t alen; /* allocated extent length */
- xfs_fileoff_t aoff; /* allocated file offset */
- xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */
- xfs_btree_cur_t *cur; /* bmap btree cursor */
- xfs_fileoff_t end; /* end of mapped file region */
- int eof; /* we've hit the end of extents */
- xfs_bmbt_rec_host_t *ep; /* extent record pointer */
- int error; /* error return */
- xfs_bmbt_irec_t got; /* current file extent record */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_extlen_t indlen; /* indirect blocks length */
- xfs_extnum_t lastx; /* last useful extent number */
- int logflags; /* flags for transaction logging */
- xfs_extlen_t minleft; /* min blocks left after allocation */
- xfs_extlen_t minlen; /* min allocation size */
- xfs_mount_t *mp; /* xfs mount structure */
- int n; /* current extent index */
- int nallocs; /* number of extents alloc\'d */
- xfs_extnum_t nextents; /* number of extents in file */
- xfs_fileoff_t obno; /* old block number (offset) */
- xfs_bmbt_irec_t prev; /* previous file extent record */
- int tmp_logflags; /* temp flags holder */
- int whichfork; /* data or attr fork */
- char inhole; /* current location is hole in file */
- char wasdelay; /* old extent was delayed */
- char wr; /* this is a write request */
- char rt; /* this is a realtime file */
-#ifdef DEBUG
- xfs_fileoff_t orig_bno; /* original block number value */
- int orig_flags; /* original flags arg value */
- xfs_filblks_t orig_len; /* original value of len arg */
- xfs_bmbt_irec_t *orig_mval; /* original value of mval */
- int orig_nmap; /* original value of *nmap */
-
- orig_bno = bno;
- orig_len = len;
- orig_flags = flags;
- orig_mval = mval;
- orig_nmap = *nmap;
-#endif
- ASSERT(*nmap >= 1);
- ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE));
- whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
- XFS_ATTR_FORK : XFS_DATA_FORK;
- mp = ip->i_mount;
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
- mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
- if (XFS_FORCED_SHUTDOWN(mp))
- return XFS_ERROR(EIO);
- rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
- ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(ifp->if_ext_max ==
- XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
- if ((wr = (flags & XFS_BMAPI_WRITE)) != 0)
- XFS_STATS_INC(xs_blk_mapw);
- else
- XFS_STATS_INC(xs_blk_mapr);
- /*
- * IGSTATE flag is used to combine extents which
- * differ only due to the state of the extents.
- * This technique is used from xfs_getbmap()
- * when the caller does not wish to see the
- * separation (which is the default).
- *
- * This technique is also used when writing a
- * buffer which has been partially written,
- * (usually by being flushed during a chunkread),
- * to ensure one write takes place. This also
- * prevents a change in the xfs inode extents at
- * this time, intentionally. This change occurs
- * on completion of the write operation, in
- * xfs_strat_comp(), where the xfs_bmapi() call
- * is transactioned, and the extents combined.
- */
- if ((flags & XFS_BMAPI_IGSTATE) && wr) /* if writing unwritten space */
- wr = 0; /* no allocations are allowed */
- ASSERT(wr || !(flags & XFS_BMAPI_DELAY));
- logflags = 0;
- nallocs = 0;
- cur = NULL;
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
- ASSERT(wr && tp);
- if ((error = xfs_bmap_local_to_extents(tp, ip,
- firstblock, total, &logflags, whichfork)))
- goto error0;
- }
- if (wr && *firstblock == NULLFSBLOCK) {
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
- minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
- else
- minleft = 1;
- } else
- minleft = 0;
- if (!(ifp->if_flags & XFS_IFEXTENTS) &&
- (error = xfs_iread_extents(tp, ip, whichfork)))
- goto error0;
- ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
- &prev);
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- n = 0;
- end = bno + len;
- obno = bno;
- bma.ip = NULL;
- if (delta) {
- delta->xed_startoff = NULLFILEOFF;
- delta->xed_blockcount = 0;
- }
- while (bno < end && n < *nmap) {
- /*
- * Reading past eof, act as though there's a hole
- * up to end.
- */
- if (eof && !wr)
- got.br_startoff = end;
- inhole = eof || got.br_startoff > bno;
- wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) &&
- ISNULLSTARTBLOCK(got.br_startblock);
- /*
- * First, deal with the hole before the allocated space
- * that we found, if any.
- */
- if (wr && (inhole || wasdelay)) {
- /*
- * For the wasdelay case, we could also just
- * allocate the stuff asked for in this bmap call
- * but that wouldn't be as good.
- */
- if (wasdelay && !(flags & XFS_BMAPI_EXACT)) {
- alen = (xfs_extlen_t)got.br_blockcount;
- aoff = got.br_startoff;
- if (lastx != NULLEXTNUM && lastx) {
- ep = xfs_iext_get_ext(ifp, lastx - 1);
- xfs_bmbt_get_all(ep, &prev);
- }
- } else if (wasdelay) {
- alen = (xfs_extlen_t)
- XFS_FILBLKS_MIN(len,
- (got.br_startoff +
- got.br_blockcount) - bno);
- aoff = bno;
- } else {
- alen = (xfs_extlen_t)
- XFS_FILBLKS_MIN(len, MAXEXTLEN);
- if (!eof)
- alen = (xfs_extlen_t)
- XFS_FILBLKS_MIN(alen,
- got.br_startoff - bno);
- aoff = bno;
- }
- minlen = (flags & XFS_BMAPI_CONTIG) ? alen : 1;
- if (flags & XFS_BMAPI_DELAY) {
- xfs_extlen_t extsz;
-
- /* Figure out the extent size, adjust alen */
- extsz = xfs_get_extsz_hint(ip);
- if (extsz) {
- error = xfs_bmap_extsize_align(mp,
- &got, &prev, extsz,
- rt, eof,
- flags&XFS_BMAPI_DELAY,
- flags&XFS_BMAPI_CONVERT,
- &aoff, &alen);
- ASSERT(!error);
- }
-
- if (rt)
- extsz = alen / mp->m_sb.sb_rextsize;
-
- /*
- * Make a transaction-less quota reservation for
- * delayed allocation blocks. This number gets
- * adjusted later. We return if we haven't
- * allocated blocks already inside this loop.
- */
- if ((error = XFS_TRANS_RESERVE_QUOTA_NBLKS(
- mp, NULL, ip, (long)alen, 0,
- rt ? XFS_QMOPT_RES_RTBLKS :
- XFS_QMOPT_RES_REGBLKS))) {
- if (n == 0) {
- *nmap = 0;
- ASSERT(cur == NULL);
- return error;
- }
- break;
- }
-
- /*
- * Split changing sb for alen and indlen since
- * they could be coming from different places.
- */
- indlen = (xfs_extlen_t)
- xfs_bmap_worst_indlen(ip, alen);
- ASSERT(indlen > 0);
-
- if (rt) {
- error = xfs_mod_incore_sb(mp,
- XFS_SBS_FREXTENTS,
- -((int64_t)extsz), (flags &
- XFS_BMAPI_RSVBLOCKS));
- } else {
- error = xfs_mod_incore_sb(mp,
- XFS_SBS_FDBLOCKS,
- -((int64_t)alen), (flags &
- XFS_BMAPI_RSVBLOCKS));
- }
- if (!error) {
- error = xfs_mod_incore_sb(mp,
- XFS_SBS_FDBLOCKS,
- -((int64_t)indlen), (flags &
- XFS_BMAPI_RSVBLOCKS));
- if (error && rt)
- xfs_mod_incore_sb(mp,
- XFS_SBS_FREXTENTS,
- (int64_t)extsz, (flags &
- XFS_BMAPI_RSVBLOCKS));
- else if (error)
- xfs_mod_incore_sb(mp,
- XFS_SBS_FDBLOCKS,
- (int64_t)alen, (flags &
- XFS_BMAPI_RSVBLOCKS));
- }
-
- if (error) {
- if (XFS_IS_QUOTA_ON(mp))
- /* unreserve the blocks now */
- (void)
- XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
- mp, NULL, ip,
- (long)alen, 0, rt ?
- XFS_QMOPT_RES_RTBLKS :
- XFS_QMOPT_RES_REGBLKS);
- break;
- }
-
- ip->i_delayed_blks += alen;
- abno = NULLSTARTBLOCK(indlen);
- } else {
- /*
- * If first time, allocate and fill in
- * once-only bma fields.
- */
- if (bma.ip == NULL) {
- bma.tp = tp;
- bma.ip = ip;
- bma.prevp = &prev;
- bma.gotp = &got;
- bma.total = total;
- bma.userdata = 0;
- }
- /* Indicate if this is the first user data
- * in the file, or just any user data.
- */
- if (!(flags & XFS_BMAPI_METADATA)) {
- bma.userdata = (aoff == 0) ?
- XFS_ALLOC_INITIAL_USER_DATA :
- XFS_ALLOC_USERDATA;
- }
- /*
- * Fill in changeable bma fields.
- */
- bma.eof = eof;
- bma.firstblock = *firstblock;
- bma.alen = alen;
- bma.off = aoff;
- bma.conv = !!(flags & XFS_BMAPI_CONVERT);
- bma.wasdel = wasdelay;
- bma.minlen = minlen;
- bma.low = flist->xbf_low;
- bma.minleft = minleft;
- /*
- * Only want to do the alignment at the
- * eof if it is userdata and allocation length
- * is larger than a stripe unit.
- */
- if (mp->m_dalign && alen >= mp->m_dalign &&
- (!(flags & XFS_BMAPI_METADATA)) &&
- (whichfork == XFS_DATA_FORK)) {
- if ((error = xfs_bmap_isaeof(ip, aoff,
- whichfork, &bma.aeof)))
- goto error0;
- } else
- bma.aeof = 0;
- /*
- * Call allocator.
- */
- if ((error = xfs_bmap_alloc(&bma)))
- goto error0;
- /*
- * Copy out result fields.
- */
- abno = bma.rval;
- if ((flist->xbf_low = bma.low))
- minleft = 0;
- alen = bma.alen;
- aoff = bma.off;
- ASSERT(*firstblock == NULLFSBLOCK ||
- XFS_FSB_TO_AGNO(mp, *firstblock) ==
- XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
- (flist->xbf_low &&
- XFS_FSB_TO_AGNO(mp, *firstblock) <
- XFS_FSB_TO_AGNO(mp, bma.firstblock)));
- *firstblock = bma.firstblock;
- if (cur)
- cur->bc_private.b.firstblock =
- *firstblock;
- if (abno == NULLFSBLOCK)
- break;
- if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
- cur = xfs_btree_init_cursor(mp,
- tp, NULL, 0, XFS_BTNUM_BMAP,
- ip, whichfork);
- cur->bc_private.b.firstblock =
- *firstblock;
- cur->bc_private.b.flist = flist;
- }
- /*
- * Bump the number of extents we've allocated
- * in this call.
- */
- nallocs++;
- }
- if (cur)
- cur->bc_private.b.flags =
- wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0;
- got.br_startoff = aoff;
- got.br_startblock = abno;
- got.br_blockcount = alen;
- got.br_state = XFS_EXT_NORM; /* assume normal */
- /*
- * Determine state of extent, and the filesystem.
- * A wasdelay extent has been initialized, so
- * shouldn't be flagged as unwritten.
- */
- if (wr && xfs_sb_version_hasextflgbit(&mp->m_sb)) {
- if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
- got.br_state = XFS_EXT_UNWRITTEN;
- }
- error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
- firstblock, flist, &tmp_logflags, delta,
- whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
- logflags |= tmp_logflags;
- if (error)
- goto error0;
- lastx = ifp->if_lastex;
- ep = xfs_iext_get_ext(ifp, lastx);
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- xfs_bmbt_get_all(ep, &got);
- ASSERT(got.br_startoff <= aoff);
- ASSERT(got.br_startoff + got.br_blockcount >=
- aoff + alen);
-#ifdef DEBUG
- if (flags & XFS_BMAPI_DELAY) {
- ASSERT(ISNULLSTARTBLOCK(got.br_startblock));
- ASSERT(STARTBLOCKVAL(got.br_startblock) > 0);
- }
- ASSERT(got.br_state == XFS_EXT_NORM ||
- got.br_state == XFS_EXT_UNWRITTEN);
-#endif
- /*
- * Fall down into the found allocated space case.
- */
- } else if (inhole) {
- /*
- * Reading in a hole.
- */
- mval->br_startoff = bno;
- mval->br_startblock = HOLESTARTBLOCK;
- mval->br_blockcount =
- XFS_FILBLKS_MIN(len, got.br_startoff - bno);
- mval->br_state = XFS_EXT_NORM;
- bno += mval->br_blockcount;
- len -= mval->br_blockcount;
- mval++;
- n++;
- continue;
- }
- /*
- * Then deal with the allocated space we found.
- */
- ASSERT(ep != NULL);
- if (!(flags & XFS_BMAPI_ENTIRE) &&
- (got.br_startoff + got.br_blockcount > obno)) {
- if (obno > bno)
- bno = obno;
- ASSERT((bno >= obno) || (n == 0));
- ASSERT(bno < end);
- mval->br_startoff = bno;
- if (ISNULLSTARTBLOCK(got.br_startblock)) {
- ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
- mval->br_startblock = DELAYSTARTBLOCK;
- } else
- mval->br_startblock =
- got.br_startblock +
- (bno - got.br_startoff);
- /*
- * Return the minimum of what we got and what we
- * asked for for the length. We can use the len
- * variable here because it is modified below
- * and we could have been there before coming
- * here if the first part of the allocation
- * didn't overlap what was asked for.
- */
- mval->br_blockcount =
- XFS_FILBLKS_MIN(end - bno, got.br_blockcount -
- (bno - got.br_startoff));
- mval->br_state = got.br_state;
- ASSERT(mval->br_blockcount <= len);
- } else {
- *mval = got;
- if (ISNULLSTARTBLOCK(mval->br_startblock)) {
- ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
- mval->br_startblock = DELAYSTARTBLOCK;
- }
- }
-
- /*
- * Check if writing previously allocated but
- * unwritten extents.
- */
- if (wr && mval->br_state == XFS_EXT_UNWRITTEN &&
- ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) {
- /*
- * Modify (by adding) the state flag, if writing.
- */
- ASSERT(mval->br_blockcount <= len);
- if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
- cur = xfs_btree_init_cursor(mp,
- tp, NULL, 0, XFS_BTNUM_BMAP,
- ip, whichfork);
- cur->bc_private.b.firstblock =
- *firstblock;
- cur->bc_private.b.flist = flist;
- }
- mval->br_state = XFS_EXT_NORM;
- error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
- firstblock, flist, &tmp_logflags, delta,
- whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
- logflags |= tmp_logflags;
- if (error)
- goto error0;
- lastx = ifp->if_lastex;
- ep = xfs_iext_get_ext(ifp, lastx);
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- xfs_bmbt_get_all(ep, &got);
- /*
- * We may have combined previously unwritten
- * space with written space, so generate
- * another request.
- */
- if (mval->br_blockcount < len)
- continue;
- }
-
- ASSERT((flags & XFS_BMAPI_ENTIRE) ||
- ((mval->br_startoff + mval->br_blockcount) <= end));
- ASSERT((flags & XFS_BMAPI_ENTIRE) ||
- (mval->br_blockcount <= len) ||
- (mval->br_startoff < obno));
- bno = mval->br_startoff + mval->br_blockcount;
- len = end - bno;
- if (n > 0 && mval->br_startoff == mval[-1].br_startoff) {
- ASSERT(mval->br_startblock == mval[-1].br_startblock);
- ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
- ASSERT(mval->br_state == mval[-1].br_state);
- mval[-1].br_blockcount = mval->br_blockcount;
- mval[-1].br_state = mval->br_state;
- } else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
- mval[-1].br_startblock != DELAYSTARTBLOCK &&
- mval[-1].br_startblock != HOLESTARTBLOCK &&
- mval->br_startblock ==
- mval[-1].br_startblock + mval[-1].br_blockcount &&
- ((flags & XFS_BMAPI_IGSTATE) ||
- mval[-1].br_state == mval->br_state)) {
- ASSERT(mval->br_startoff ==
- mval[-1].br_startoff + mval[-1].br_blockcount);
- mval[-1].br_blockcount += mval->br_blockcount;
- } else if (n > 0 &&
- mval->br_startblock == DELAYSTARTBLOCK &&
- mval[-1].br_startblock == DELAYSTARTBLOCK &&
- mval->br_startoff ==
- mval[-1].br_startoff + mval[-1].br_blockcount) {
- mval[-1].br_blockcount += mval->br_blockcount;
- mval[-1].br_state = mval->br_state;
- } else if (!((n == 0) &&
- ((mval->br_startoff + mval->br_blockcount) <=
- obno))) {
- mval++;
- n++;
- }
- /*
- * If we're done, stop now. Stop when we've allocated
- * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
- * the transaction may get too big.
- */
- if (bno >= end || n >= *nmap || nallocs >= *nmap)
- break;
- /*
- * Else go on to the next record.
- */
- ep = xfs_iext_get_ext(ifp, ++lastx);
- prev = got;
- if (lastx >= nextents)
- eof = 1;
- else
- xfs_bmbt_get_all(ep, &got);
- }
- ifp->if_lastex = lastx;
- *nmap = n;
- /*
- * Transform from btree to extents, give it cur.
- */
- if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
- ASSERT(wr && cur);
- error = xfs_bmap_btree_to_extents(tp, ip, cur,
- &tmp_logflags, whichfork);
- logflags |= tmp_logflags;
- if (error)
- goto error0;
- }
- ASSERT(ifp->if_ext_max ==
- XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
- XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
- error = 0;
- if (delta && delta->xed_startoff != NULLFILEOFF) {
- /* A change was actually made.
- * Note that delta->xed_blockount is an offset at this
- * point and needs to be converted to a block count.
- */
- ASSERT(delta->xed_blockcount > delta->xed_startoff);
- delta->xed_blockcount -= delta->xed_startoff;
- }
-error0:
- /*
- * Log everything. Do this after conversion, there's no point in
- * logging the extent records if we've converted to btree format.
- */
- if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
- logflags &= ~XFS_ILOG_FEXT(whichfork);
- else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
- logflags &= ~XFS_ILOG_FBROOT(whichfork);
- /*
- * Log whatever the flags say, even if error. Otherwise we might miss
- * detecting a case where the data is changed, there's an error,
- * and it's not logged so we don't shutdown when we should.
- */
- if (logflags) {
- ASSERT(tp && wr);
- xfs_trans_log_inode(tp, ip, logflags);
- }
- if (cur) {
- if (!error) {
- ASSERT(*firstblock == NULLFSBLOCK ||
- XFS_FSB_TO_AGNO(mp, *firstblock) ==
- XFS_FSB_TO_AGNO(mp,
- cur->bc_private.b.firstblock) ||
- (flist->xbf_low &&
- XFS_FSB_TO_AGNO(mp, *firstblock) <
- XFS_FSB_TO_AGNO(mp,
- cur->bc_private.b.firstblock)));
- *firstblock = cur->bc_private.b.firstblock;
- }
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
- }
- if (!error)
- xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
- orig_nmap, *nmap);
- return error;
-}
-
-/*
- * Map file blocks to filesystem blocks, simple version.
- * One block (extent) only, read-only.
- * For flags, only the XFS_BMAPI_ATTRFORK flag is examined.
- * For the other flag values, the effect is as if XFS_BMAPI_METADATA
- * was set and all the others were clear.
- */
-int /* error */
-xfs_bmapi_single(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode */
- int whichfork, /* data or attr fork */
- xfs_fsblock_t *fsb, /* output: mapped block */
- xfs_fileoff_t bno) /* starting file offs. mapped */
-{
- int eof; /* we've hit the end of extents */
- int error; /* error return */
- xfs_bmbt_irec_t got; /* current file extent record */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_extnum_t lastx; /* last useful extent number */
- xfs_bmbt_irec_t prev; /* previous file extent record */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- if (unlikely(
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)) {
- XFS_ERROR_REPORT("xfs_bmapi_single", XFS_ERRLEVEL_LOW,
- ip->i_mount);
- return XFS_ERROR(EFSCORRUPTED);
- }
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return XFS_ERROR(EIO);
- XFS_STATS_INC(xs_blk_mapr);
- if (!(ifp->if_flags & XFS_IFEXTENTS) &&
- (error = xfs_iread_extents(tp, ip, whichfork)))
- return error;
- (void)xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
- &prev);
- /*
- * Reading past eof, act as though there's a hole
- * up to end.
- */
- if (eof || got.br_startoff > bno) {
- *fsb = NULLFSBLOCK;
- return 0;
- }
- ASSERT(!ISNULLSTARTBLOCK(got.br_startblock));
- ASSERT(bno < got.br_startoff + got.br_blockcount);
- *fsb = got.br_startblock + (bno - got.br_startoff);
- ifp->if_lastex = lastx;
- return 0;
-}
-
-/*
* Unmap (remove) blocks from a file.
* If nexts is nonzero then the number of extents to remove is limited to
* that value. If not all extents in the block range can be removed then
@@ -5357,8 +5043,6 @@ xfs_bunmapi(
xfs_fsblock_t *firstblock, /* first allocated block
controls a.g. for allocs */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
- xfs_extdelta_t *delta, /* o: change made to incore
- extents */
int *done) /* set if not done yet */
{
xfs_btree_cur_t *cur; /* bmap btree cursor */
@@ -5380,10 +5064,10 @@ xfs_bunmapi(
int tmp_logflags; /* partial logging flags */
int wasdel; /* was a delayed alloc extent */
int whichfork; /* data or attribute fork */
- int rsvd; /* OK to allocate reserved blocks */
xfs_fsblock_t sum;
- xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address);
+ trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
+
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
XFS_ATTR_FORK : XFS_DATA_FORK;
ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -5397,11 +5081,11 @@ xfs_bunmapi(
mp = ip->i_mount;
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(len > 0);
ASSERT(nexts >= 0);
- ASSERT(ifp->if_ext_max ==
- XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+
if (!(ifp->if_flags & XFS_IFEXTENTS) &&
(error = xfs_iread_extents(tp, ip, whichfork)))
return error;
@@ -5416,10 +5100,7 @@ xfs_bunmapi(
bno = start + len - 1;
ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
&prev);
- if (delta) {
- delta->xed_startoff = NULLFILEOFF;
- delta->xed_blockcount = 0;
- }
+
/*
* Check to see if the given block number is past the end of the
* file, back up to the last block if so...
@@ -5432,13 +5113,21 @@ xfs_bunmapi(
logflags = 0;
if (ifp->if_flags & XFS_IFBROOT) {
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
- cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
- whichfork);
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstblock;
cur->bc_private.b.flist = flist;
cur->bc_private.b.flags = 0;
} else
cur = NULL;
+
+ if (isrt) {
+ /*
+ * Synchronize by locking the bitmap inode.
+ */
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+ }
+
extno = 0;
while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
(nexts == 0 || extno < nexts)) {
@@ -5466,7 +5155,7 @@ xfs_bunmapi(
*/
ASSERT(ep != NULL);
del = got;
- wasdel = ISNULLSTARTBLOCK(del.br_startblock);
+ wasdel = isnullstartblock(del.br_startblock);
if (got.br_startoff < start) {
del.br_startoff = start;
del.br_blockcount -= start - got.br_startoff;
@@ -5517,9 +5206,9 @@ xfs_bunmapi(
del.br_blockcount = mod;
}
del.br_state = XFS_EXT_UNWRITTEN;
- error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
- firstblock, flist, &logflags, delta,
- XFS_DATA_FORK, 0);
+ error = xfs_bmap_add_extent_unwritten_real(tp, ip,
+ &lastx, &cur, &del, firstblock, flist,
+ &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5545,9 +5234,12 @@ xfs_bunmapi(
*/
ASSERT(bno >= del.br_blockcount);
bno -= del.br_blockcount;
- if (bno < got.br_startoff) {
- if (--lastx >= 0)
- xfs_bmbt_get_all(--ep, &got);
+ if (got.br_startoff > bno) {
+ if (--lastx >= 0) {
+ ep = xfs_iext_get_ext(ifp,
+ lastx);
+ xfs_bmbt_get_all(ep, &got);
+ }
}
continue;
} else if (del.br_state == XFS_EXT_UNWRITTEN) {
@@ -5561,7 +5253,7 @@ xfs_bunmapi(
xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
lastx - 1), &prev);
ASSERT(prev.br_state == XFS_EXT_NORM);
- ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock));
+ ASSERT(!isnullstartblock(prev.br_startblock));
ASSERT(del.br_startblock ==
prev.br_startblock + prev.br_blockcount);
if (prev.br_startoff < start) {
@@ -5571,25 +5263,26 @@ xfs_bunmapi(
prev.br_startoff = start;
}
prev.br_state = XFS_EXT_UNWRITTEN;
- error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
- &prev, firstblock, flist, &logflags,
- delta, XFS_DATA_FORK, 0);
+ lastx--;
+ error = xfs_bmap_add_extent_unwritten_real(tp,
+ ip, &lastx, &cur, &prev,
+ firstblock, flist, &logflags);
if (error)
goto error0;
goto nodelete;
} else {
ASSERT(del.br_state == XFS_EXT_NORM);
del.br_state = XFS_EXT_UNWRITTEN;
- error = xfs_bmap_add_extent(ip, lastx, &cur,
- &del, firstblock, flist, &logflags,
- delta, XFS_DATA_FORK, 0);
+ error = xfs_bmap_add_extent_unwritten_real(tp,
+ ip, &lastx, &cur, &del,
+ firstblock, flist, &logflags);
if (error)
goto error0;
goto nodelete;
}
}
if (wasdel) {
- ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
+ ASSERT(startblockval(del.br_startblock) > 0);
/* Update realtime/data freespace, unreserve quota */
if (isrt) {
xfs_filblks_t rtexts;
@@ -5597,15 +5290,15 @@ xfs_bunmapi(
rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
do_div(rtexts, mp->m_sb.sb_rextsize);
xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
- (int64_t)rtexts, rsvd);
- (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
- NULL, ip, -((long)del.br_blockcount), 0,
+ (int64_t)rtexts, 0);
+ (void)xfs_trans_reserve_quota_nblks(NULL,
+ ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_RTBLKS);
} else {
- xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
- (int64_t)del.br_blockcount, rsvd);
- (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
- NULL, ip, -((long)del.br_blockcount), 0,
+ xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+ (int64_t)del.br_blockcount, 0);
+ (void)xfs_trans_reserve_quota_nblks(NULL,
+ ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_REGBLKS);
}
ip->i_delayed_blks -= del.br_blockcount;
@@ -5627,46 +5320,43 @@ xfs_bunmapi(
*/
if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
+ XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
+ XFS_IFORK_MAXEXT(ip, whichfork) &&
del.br_startoff > got.br_startoff &&
del.br_startoff + del.br_blockcount <
got.br_startoff + got.br_blockcount) {
error = XFS_ERROR(ENOSPC);
goto error0;
}
- error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
- &tmp_logflags, delta, whichfork, rsvd);
+ error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
+ &tmp_logflags, whichfork);
logflags |= tmp_logflags;
if (error)
goto error0;
bno = del.br_startoff - 1;
nodelete:
- lastx = ifp->if_lastex;
/*
* If not done go on to the next (previous) record.
- * Reset ep in case the extents array was re-alloced.
*/
- ep = xfs_iext_get_ext(ifp, lastx);
if (bno != (xfs_fileoff_t)-1 && bno >= start) {
- if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) ||
- xfs_bmbt_get_startoff(ep) > bno) {
- if (--lastx >= 0)
- ep = xfs_iext_get_ext(ifp, lastx);
- }
- if (lastx >= 0)
+ if (lastx >= 0) {
+ ep = xfs_iext_get_ext(ifp, lastx);
+ if (xfs_bmbt_get_startoff(ep) > bno) {
+ if (--lastx >= 0)
+ ep = xfs_iext_get_ext(ifp,
+ lastx);
+ }
xfs_bmbt_get_all(ep, &got);
+ }
extno++;
}
}
- ifp->if_lastex = lastx;
*done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
- ASSERT(ifp->if_ext_max ==
- XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+
/*
* Convert to a btree if necessary.
*/
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
+ if (xfs_bmap_needs_btree(ip, whichfork)) {
ASSERT(cur == NULL);
error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
&cur, 0, &tmp_logflags, whichfork);
@@ -5677,8 +5367,7 @@ nodelete:
/*
* transform from btree to extents, give it cur
*/
- else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
+ else if (xfs_bmap_wants_extents(ip, whichfork)) {
ASSERT(cur != NULL);
error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
whichfork);
@@ -5689,28 +5378,18 @@ nodelete:
/*
* transform from extents to local?
*/
- ASSERT(ifp->if_ext_max ==
- XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
error = 0;
- if (delta && delta->xed_startoff != NULLFILEOFF) {
- /* A change was actually made.
- * Note that delta->xed_blockount is an offset at this
- * point and needs to be converted to a block count.
- */
- ASSERT(delta->xed_blockcount > delta->xed_startoff);
- delta->xed_blockcount -= delta->xed_startoff;
- }
error0:
/*
* Log everything. Do this after conversion, there's no point in
* logging the extent records if we've converted to btree format.
*/
- if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
+ if ((logflags & xfs_ilog_fext(whichfork)) &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
- logflags &= ~XFS_ILOG_FEXT(whichfork);
- else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
+ logflags &= ~xfs_ilog_fext(whichfork);
+ else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
- logflags &= ~XFS_ILOG_FBROOT(whichfork);
+ logflags &= ~xfs_ilog_fbroot(whichfork);
/*
* Log inode even in the error case, if the transaction
* is dirty we'll need to shut down the filesystem.
@@ -5729,780 +5408,199 @@ error0:
}
/*
- * returns 1 for success, 0 if we failed to map the extent.
- */
-STATIC int
-xfs_getbmapx_fix_eof_hole(
- xfs_inode_t *ip, /* xfs incore inode pointer */
- struct getbmap *out, /* output structure */
- int prealloced, /* this is a file with
- * preallocated data space */
- __int64_t end, /* last block requested */
- xfs_fsblock_t startblock)
-{
- __int64_t fixlen;
- xfs_mount_t *mp; /* file system mount point */
-
- if (startblock == HOLESTARTBLOCK) {
- mp = ip->i_mount;
- out->bmv_block = -1;
- fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, ip->i_size));
- fixlen -= out->bmv_offset;
- if (prealloced && out->bmv_offset + out->bmv_length == end) {
- /* Came to hole at EOF. Trim it. */
- if (fixlen <= 0)
- return 0;
- out->bmv_length = fixlen;
- }
- } else {
- out->bmv_block = XFS_FSB_TO_DB(ip, startblock);
- }
-
- return 1;
-}
-
-/*
- * Fcntl interface to xfs_bmapi.
+ * Shift extent records to the left to cover a hole.
+ *
+ * The maximum number of extents to be shifted in a single operation
+ * is @num_exts, and @current_ext keeps track of the current extent
+ * index we have shifted. @offset_shift_fsb is the length by which each
+ * extent is shifted. If there is no hole to shift the extents
+ * into, this will be considered invalid operation and we abort immediately.
*/
-int /* error code */
-xfs_getbmap(
- xfs_inode_t *ip,
- struct getbmap *bmv, /* user bmap structure */
- void __user *ap, /* pointer to user's array */
- int interface) /* interface flags */
+int
+xfs_bmap_shift_extents(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ int *done,
+ xfs_fileoff_t start_fsb,
+ xfs_fileoff_t offset_shift_fsb,
+ xfs_extnum_t *current_ext,
+ xfs_fsblock_t *firstblock,
+ struct xfs_bmap_free *flist,
+ int num_exts)
{
- __int64_t bmvend; /* last block requested */
- int error; /* return value */
- __int64_t fixlen; /* length for -1 case */
- int i; /* extent number */
- int lock; /* lock state */
- xfs_bmbt_irec_t *map; /* buffer for user's data */
- xfs_mount_t *mp; /* file system mount point */
- int nex; /* # of user extents can do */
- int nexleft; /* # of user extents left */
- int subnex; /* # of bmapi's can do */
- int nmap; /* number of map entries */
- struct getbmap out; /* output structure */
- int whichfork; /* data or attr fork */
- int prealloced; /* this is a file with
- * preallocated data space */
- int sh_unwritten; /* true, if unwritten */
- /* extents listed separately */
- int bmapi_flags; /* flags for xfs_bmapi */
- __int32_t oflags; /* getbmapx bmv_oflags field */
-
- mp = ip->i_mount;
-
- whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
- sh_unwritten = (interface & BMV_IF_PREALLOC) != 0;
-
- /* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
- * generate a DMAPI read event. Otherwise, if the DM_EVENT_READ
- * bit is set for the file, generate a read event in order
- * that the DMAPI application may do its thing before we return
- * the extents. Usually this means restoring user file data to
- * regions of the file that look like holes.
- *
- * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
- * BMV_IF_NO_DMAPI_READ so that read events are generated.
- * If this were not true, callers of ioctl( XFS_IOC_GETBMAP )
- * could misinterpret holes in a DMAPI file as true holes,
- * when in fact they may represent offline user data.
- */
- if ((interface & BMV_IF_NO_DMAPI_READ) == 0 &&
- DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
- whichfork == XFS_DATA_FORK) {
- error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL);
- if (error)
- return XFS_ERROR(error);
- }
-
- if (whichfork == XFS_ATTR_FORK) {
- if (XFS_IFORK_Q(ip)) {
- if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
- ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
- return XFS_ERROR(EINVAL);
- } else if (unlikely(
- ip->i_d.di_aformat != 0 &&
- ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
- XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
- ip->i_mount);
- return XFS_ERROR(EFSCORRUPTED);
- }
- } else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
- ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
- return XFS_ERROR(EINVAL);
- if (whichfork == XFS_DATA_FORK) {
- if (xfs_get_extsz_hint(ip) ||
- ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
- prealloced = 1;
- fixlen = XFS_MAXIOFFSET(mp);
- } else {
- prealloced = 0;
- fixlen = ip->i_size;
- }
- } else {
- prealloced = 0;
- fixlen = 1LL << 32;
- }
-
- if (bmv->bmv_length == -1) {
- fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
- bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset),
- (__int64_t)0);
- } else if (bmv->bmv_length < 0)
- return XFS_ERROR(EINVAL);
- if (bmv->bmv_length == 0) {
- bmv->bmv_entries = 0;
- return 0;
- }
- nex = bmv->bmv_count - 1;
- if (nex <= 0)
- return XFS_ERROR(EINVAL);
- bmvend = bmv->bmv_offset + bmv->bmv_length;
-
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
-
- if (whichfork == XFS_DATA_FORK &&
- (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size)) {
- /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */
- error = xfs_flush_pages(ip, (xfs_off_t)0,
- -1, 0, FI_REMAPF);
- if (error) {
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return error;
- }
- }
+ struct xfs_btree_cur *cur;
+ struct xfs_bmbt_rec_host *gotp;
+ struct xfs_bmbt_irec got;
+ struct xfs_bmbt_irec left;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp;
+ xfs_extnum_t nexts = 0;
+ xfs_fileoff_t startoff;
+ int error = 0;
+ int i;
+ int whichfork = XFS_DATA_FORK;
+ int logflags;
+ xfs_filblks_t blockcount = 0;
+ int total_extents;
- ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0);
-
- lock = xfs_ilock_map_shared(ip);
-
- /*
- * Don't let nex be bigger than the number of extents
- * we can have assuming alternating holes and real extents.
- */
- if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
- nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
-
- bmapi_flags = XFS_BMAPI_AFLAG(whichfork) |
- ((sh_unwritten) ? 0 : XFS_BMAPI_IGSTATE);
-
- /*
- * Allocate enough space to handle "subnex" maps at a time.
- */
- subnex = 16;
- map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP);
-
- bmv->bmv_entries = 0;
-
- if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0) {
- error = 0;
- goto unlock_and_return;
+ if (unlikely(XFS_TEST_ERROR(
+ (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
+ mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+ XFS_ERROR_REPORT("xfs_bmap_shift_extents",
+ XFS_ERRLEVEL_LOW, mp);
+ return XFS_ERROR(EFSCORRUPTED);
}
- nexleft = nex;
-
- do {
- nmap = (nexleft > subnex) ? subnex : nexleft;
- error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
- XFS_BB_TO_FSB(mp, bmv->bmv_length),
- bmapi_flags, NULL, 0, map, &nmap,
- NULL, NULL);
- if (error)
- goto unlock_and_return;
- ASSERT(nmap <= subnex);
-
- for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
- nexleft--;
- oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ?
- BMV_OF_PREALLOC : 0;
- out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff);
- out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
- ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
- if (map[i].br_startblock == HOLESTARTBLOCK &&
- whichfork == XFS_ATTR_FORK) {
- /* came to the end of attribute fork */
- goto unlock_and_return;
- } else {
- if (!xfs_getbmapx_fix_eof_hole(ip, &out,
- prealloced, bmvend,
- map[i].br_startblock)) {
- goto unlock_and_return;
- }
-
- /* return either getbmap/getbmapx structure. */
- if (interface & BMV_IF_EXTENDED) {
- struct getbmapx outx;
-
- GETBMAP_CONVERT(out,outx);
- outx.bmv_oflags = oflags;
- outx.bmv_unused1 = outx.bmv_unused2 = 0;
- if (copy_to_user(ap, &outx,
- sizeof(outx))) {
- error = XFS_ERROR(EFAULT);
- goto unlock_and_return;
- }
- } else {
- if (copy_to_user(ap, &out,
- sizeof(out))) {
- error = XFS_ERROR(EFAULT);
- goto unlock_and_return;
- }
- }
- bmv->bmv_offset =
- out.bmv_offset + out.bmv_length;
- bmv->bmv_length = MAX((__int64_t)0,
- (__int64_t)(bmvend - bmv->bmv_offset));
- bmv->bmv_entries++;
- ap = (interface & BMV_IF_EXTENDED) ?
- (void __user *)
- ((struct getbmapx __user *)ap + 1) :
- (void __user *)
- ((struct getbmap __user *)ap + 1);
- }
- }
- } while (nmap && nexleft && bmv->bmv_length);
-
-unlock_and_return:
- xfs_iunlock_map_shared(ip, lock);
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return XFS_ERROR(EIO);
- kmem_free(map, subnex * sizeof(*map));
+ ASSERT(current_ext != NULL);
- return error;
-}
-
-/*
- * Check the last inode extent to determine whether this allocation will result
- * in blocks being allocated at the end of the file. When we allocate new data
- * blocks at the end of the file which do not start at the previous data block,
- * we will try to align the new blocks at stripe unit boundaries.
- */
-STATIC int /* error */
-xfs_bmap_isaeof(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fileoff_t off, /* file offset in fsblocks */
- int whichfork, /* data or attribute fork */
- char *aeof) /* return value */
-{
- int error; /* error return value */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_bmbt_rec_host_t *lastrec; /* extent record pointer */
- xfs_extnum_t nextents; /* number of file extents */
- xfs_bmbt_irec_t s; /* expanded extent record */
-
- ASSERT(whichfork == XFS_DATA_FORK);
ifp = XFS_IFORK_PTR(ip, whichfork);
- if (!(ifp->if_flags & XFS_IFEXTENTS) &&
- (error = xfs_iread_extents(NULL, ip, whichfork)))
- return error;
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- if (nextents == 0) {
- *aeof = 1;
- return 0;
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ /* Read in all the extents */
+ error = xfs_iread_extents(tp, ip, whichfork);
+ if (error)
+ return error;
}
- /*
- * Go to the last extent
- */
- lastrec = xfs_iext_get_ext(ifp, nextents - 1);
- xfs_bmbt_get_all(lastrec, &s);
- /*
- * Check we are allocating in the last extent (for delayed allocations)
- * or past the last extent for non-delayed allocations.
- */
- *aeof = (off >= s.br_startoff &&
- off < s.br_startoff + s.br_blockcount &&
- ISNULLSTARTBLOCK(s.br_startblock)) ||
- off >= s.br_startoff + s.br_blockcount;
- return 0;
-}
-/*
- * Check if the endoff is outside the last extent. If so the caller will grow
- * the allocation to a stripe unit boundary.
- */
-int /* error */
-xfs_bmap_eof(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fileoff_t endoff, /* file offset in fsblocks */
- int whichfork, /* data or attribute fork */
- int *eof) /* result value */
-{
- xfs_fsblock_t blockcount; /* extent block count */
- int error; /* error return value */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_bmbt_rec_host_t *lastrec; /* extent record pointer */
- xfs_extnum_t nextents; /* number of file extents */
- xfs_fileoff_t startoff; /* extent starting file offset */
-
- ASSERT(whichfork == XFS_DATA_FORK);
- ifp = XFS_IFORK_PTR(ip, whichfork);
- if (!(ifp->if_flags & XFS_IFEXTENTS) &&
- (error = xfs_iread_extents(NULL, ip, whichfork)))
- return error;
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- if (nextents == 0) {
- *eof = 1;
- return 0;
- }
/*
- * Go to the last extent
+ * If *current_ext is 0, we would need to lookup the extent
+ * from where we would start shifting and store it in gotp.
*/
- lastrec = xfs_iext_get_ext(ifp, nextents - 1);
- startoff = xfs_bmbt_get_startoff(lastrec);
- blockcount = xfs_bmbt_get_blockcount(lastrec);
- *eof = endoff >= startoff + blockcount;
- return 0;
-}
-
-#ifdef DEBUG
-STATIC
-xfs_buf_t *
-xfs_bmap_get_bp(
- xfs_btree_cur_t *cur,
- xfs_fsblock_t bno)
-{
- int i;
- xfs_buf_t *bp;
-
- if (!cur)
- return(NULL);
-
- bp = NULL;
- for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
- bp = cur->bc_bufs[i];
- if (!bp) break;
- if (XFS_BUF_ADDR(bp) == bno)
- break; /* Found it */
- }
- if (i == XFS_BTREE_MAXLEVELS)
- bp = NULL;
-
- if (!bp) { /* Chase down all the log items to see if the bp is there */
- xfs_log_item_chunk_t *licp;
- xfs_trans_t *tp;
-
- tp = cur->bc_tp;
- licp = &tp->t_items;
- while (!bp && licp != NULL) {
- if (XFS_LIC_ARE_ALL_FREE(licp)) {
- licp = licp->lic_next;
- continue;
- }
- for (i = 0; i < licp->lic_unused; i++) {
- xfs_log_item_desc_t *lidp;
- xfs_log_item_t *lip;
- xfs_buf_log_item_t *bip;
- xfs_buf_t *lbp;
-
- if (XFS_LIC_ISFREE(licp, i)) {
- continue;
- }
-
- lidp = XFS_LIC_SLOT(licp, i);
- lip = lidp->lid_item;
- if (lip->li_type != XFS_LI_BUF)
- continue;
-
- bip = (xfs_buf_log_item_t *)lip;
- lbp = bip->bli_buf;
-
- if (XFS_BUF_ADDR(lbp) == bno) {
- bp = lbp;
- break; /* Found it */
- }
- }
- licp = licp->lic_next;
- }
- }
- return(bp);
-}
-
-void
-xfs_check_block(
- xfs_bmbt_block_t *block,
- xfs_mount_t *mp,
- int root,
- short sz)
-{
- int i, j, dmxr;
- __be64 *pp, *thispa; /* pointer to block address */
- xfs_bmbt_key_t *prevp, *keyp;
-
- ASSERT(be16_to_cpu(block->bb_level) > 0);
-
- prevp = NULL;
- for( i = 1; i <= be16_to_cpu(block->bb_numrecs); i++) {
- dmxr = mp->m_bmap_dmxr[0];
-
- if (root) {
- keyp = XFS_BMAP_BROOT_KEY_ADDR(block, i, sz);
- } else {
- keyp = XFS_BTREE_KEY_ADDR(xfs_bmbt, block, i);
- }
-
- if (prevp) {
- xfs_btree_check_key(XFS_BTNUM_BMAP, prevp, keyp);
- }
- prevp = keyp;
-
+ if (!*current_ext) {
+ gotp = xfs_iext_bno_to_ext(ifp, start_fsb, current_ext);
/*
- * Compare the block numbers to see if there are dups.
+ * gotp can be null in 2 cases: 1) if there are no extents
+ * or 2) start_fsb lies in a hole beyond which there are
+ * no extents. Either way, we are done.
*/
-
- if (root) {
- pp = XFS_BMAP_BROOT_PTR_ADDR(block, i, sz);
- } else {
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, i, dmxr);
- }
- for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
- if (root) {
- thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz);
- } else {
- thispa = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, j,
- dmxr);
- }
- if (*thispa == *pp) {
- cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
- __func__, j, i,
- (unsigned long long)be64_to_cpu(*thispa));
- panic("%s: ptrs are equal in node\n",
- __func__);
- }
+ if (!gotp) {
+ *done = 1;
+ return 0;
}
}
-}
-
-/*
- * Check that the extents for the inode ip are in the right order in all
- * btree leaves.
- */
-
-STATIC void
-xfs_bmap_check_leaf_extents(
- xfs_btree_cur_t *cur, /* btree cursor or null */
- xfs_inode_t *ip, /* incore inode pointer */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_block_t *block; /* current btree block */
- xfs_fsblock_t bno; /* block # of "block" */
- xfs_buf_t *bp; /* buffer for "block" */
- int error; /* error return value */
- xfs_extnum_t i=0, j; /* index into the extents list */
- xfs_ifork_t *ifp; /* fork structure */
- int level; /* btree level, for checking */
- xfs_mount_t *mp; /* file system mount structure */
- __be64 *pp; /* pointer to block address */
- xfs_bmbt_rec_t *ep; /* pointer to current extent */
- xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
- xfs_bmbt_rec_t *nextp; /* pointer to next extent */
- int bp_release = 0;
-
- if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
- return;
- }
-
- bno = NULLFSBLOCK;
- mp = ip->i_mount;
- ifp = XFS_IFORK_PTR(ip, whichfork);
- block = ifp->if_broot;
- /*
- * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
- */
- level = be16_to_cpu(block->bb_level);
- ASSERT(level > 0);
- xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
- pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
- bno = be64_to_cpu(*pp);
-
- ASSERT(bno != NULLDFSBNO);
- ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
- ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
- /*
- * Go down the tree until leaf level is reached, following the first
- * pointer (leftmost) at each level.
- */
- while (level-- > 0) {
- /* See if buf is in cur first */
- bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
- if (bp) {
- bp_release = 0;
- } else {
- bp_release = 1;
- }
- if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
- XFS_BMAP_BTREE_REF)))
- goto error_norelse;
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- XFS_WANT_CORRUPTED_GOTO(
- XFS_BMAP_SANITY_CHECK(mp, block, level),
- error0);
- if (level == 0)
- break;
-
- /*
- * Check this block for basic sanity (increasing keys and
- * no duplicate blocks).
- */
-
- xfs_check_block(block, mp, 0, 0);
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
- bno = be64_to_cpu(*pp);
- XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
- if (bp_release) {
- bp_release = 0;
- xfs_trans_brelse(NULL, bp);
- }
+ /* We are going to change core inode */
+ logflags = XFS_ILOG_CORE;
+ if (ifp->if_flags & XFS_IFBROOT) {
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+ cur->bc_private.b.firstblock = *firstblock;
+ cur->bc_private.b.flist = flist;
+ cur->bc_private.b.flags = 0;
+ } else {
+ cur = NULL;
+ logflags |= XFS_ILOG_DEXT;
}
/*
- * Here with bp and block set to the leftmost leaf node in the tree.
+ * There may be delalloc extents in the data fork before the range we
+ * are collapsing out, so we cannot
+ * use the count of real extents here. Instead we have to calculate it
+ * from the incore fork.
*/
- i = 0;
+ total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+ while (nexts++ < num_exts && *current_ext < total_extents) {
- /*
- * Loop over all leaf nodes checking that all extents are in the right order.
- */
- for (;;) {
- xfs_fsblock_t nextbno;
- xfs_extnum_t num_recs;
-
-
- num_recs = be16_to_cpu(block->bb_numrecs);
+ gotp = xfs_iext_get_ext(ifp, *current_ext);
+ xfs_bmbt_get_all(gotp, &got);
+ startoff = got.br_startoff - offset_shift_fsb;
/*
- * Read-ahead the next leaf block, if any.
- */
-
- nextbno = be64_to_cpu(block->bb_rightsib);
-
- /*
- * Check all the extents to make sure they are OK.
- * If we had a previous block, the last entry should
- * conform with the first entry in this one.
+ * Before shifting extent into hole, make sure that the hole
+ * is large enough to accomodate the shift.
*/
+ if (*current_ext) {
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
+ *current_ext - 1), &left);
- ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
- if (i) {
- xfs_btree_check_rec(XFS_BTNUM_BMAP, &last, ep);
- }
- for (j = 1; j < num_recs; j++) {
- nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1);
- xfs_btree_check_rec(XFS_BTNUM_BMAP, ep, nextp);
- ep = nextp;
+ if (startoff < left.br_startoff + left.br_blockcount)
+ error = XFS_ERROR(EINVAL);
+ } else if (offset_shift_fsb > got.br_startoff) {
+ /*
+ * When first extent is shifted, offset_shift_fsb
+ * should be less than the stating offset of
+ * the first extent.
+ */
+ error = XFS_ERROR(EINVAL);
}
- last = *ep;
- i += num_recs;
- if (bp_release) {
- bp_release = 0;
- xfs_trans_brelse(NULL, bp);
- }
- bno = nextbno;
- /*
- * If we've reached the end, stop.
- */
- if (bno == NULLFSBLOCK)
- break;
+ if (error)
+ goto del_cursor;
- bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
- if (bp) {
- bp_release = 0;
- } else {
- bp_release = 1;
+ if (cur) {
+ error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
+ got.br_startblock,
+ got.br_blockcount,
+ &i);
+ if (error)
+ goto del_cursor;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
}
- if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
- XFS_BMAP_BTREE_REF)))
- goto error_norelse;
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- }
- if (bp_release) {
- bp_release = 0;
- xfs_trans_brelse(NULL, bp);
- }
- return;
-error0:
- cmn_err(CE_WARN, "%s: at error0", __func__);
- if (bp_release)
- xfs_trans_brelse(NULL, bp);
-error_norelse:
- cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
- __func__, i);
- panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
- return;
-}
-#endif
+ /* Check if we can merge 2 adjacent extents */
+ if (*current_ext &&
+ left.br_startoff + left.br_blockcount == startoff &&
+ left.br_startblock + left.br_blockcount ==
+ got.br_startblock &&
+ left.br_state == got.br_state &&
+ left.br_blockcount + got.br_blockcount <= MAXEXTLEN) {
+ blockcount = left.br_blockcount +
+ got.br_blockcount;
+ xfs_iext_remove(ip, *current_ext, 1, 0);
+ if (cur) {
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto del_cursor;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
+ }
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
+ gotp = xfs_iext_get_ext(ifp, --*current_ext);
+ xfs_bmbt_get_all(gotp, &got);
-/*
- * Count fsblocks of the given fork.
- */
-int /* error */
-xfs_bmap_count_blocks(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode */
- int whichfork, /* data or attr fork */
- int *count) /* out: count of blocks */
-{
- xfs_bmbt_block_t *block; /* current btree block */
- xfs_fsblock_t bno; /* block # of "block" */
- xfs_ifork_t *ifp; /* fork structure */
- int level; /* btree level, for checking */
- xfs_mount_t *mp; /* file system mount structure */
- __be64 *pp; /* pointer to block address */
+ /* Make cursor point to the extent we will update */
+ if (cur) {
+ error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
+ got.br_startblock,
+ got.br_blockcount,
+ &i);
+ if (error)
+ goto del_cursor;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
+ }
- bno = NULLFSBLOCK;
- mp = ip->i_mount;
- ifp = XFS_IFORK_PTR(ip, whichfork);
- if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
- if (unlikely(xfs_bmap_count_leaves(ifp, 0,
- ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
- count) < 0)) {
- XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)",
- XFS_ERRLEVEL_LOW, mp);
- return XFS_ERROR(EFSCORRUPTED);
+ xfs_bmbt_set_blockcount(gotp, blockcount);
+ got.br_blockcount = blockcount;
+ } else {
+ /* We have to update the startoff */
+ xfs_bmbt_set_startoff(gotp, startoff);
+ got.br_startoff = startoff;
}
- return 0;
- }
- /*
- * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
- */
- block = ifp->if_broot;
- level = be16_to_cpu(block->bb_level);
- ASSERT(level > 0);
- pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
- bno = be64_to_cpu(*pp);
- ASSERT(bno != NULLDFSBNO);
- ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
- ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
-
- if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
- XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
- mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
-
- return 0;
-}
-
-/*
- * Recursively walks each level of a btree
- * to count total fsblocks is use.
- */
-STATIC int /* error */
-xfs_bmap_count_tree(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_fsblock_t blockno, /* file system block number */
- int levelin, /* level in btree */
- int *count) /* Count of blocks */
-{
- int error;
- xfs_buf_t *bp, *nbp;
- int level = levelin;
- __be64 *pp;
- xfs_fsblock_t bno = blockno;
- xfs_fsblock_t nextbno;
- xfs_bmbt_block_t *block, *nextblock;
- int numrecs;
-
- if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
- return error;
- *count += 1;
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
-
- if (--level) {
- /* Not at node above leafs, count this level of nodes */
- nextbno = be64_to_cpu(block->bb_rightsib);
- while (nextbno != NULLFSBLOCK) {
- if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
- 0, &nbp, XFS_BMAP_BTREE_REF)))
- return error;
- *count += 1;
- nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp);
- nextbno = be64_to_cpu(nextblock->bb_rightsib);
- xfs_trans_brelse(tp, nbp);
+ if (cur) {
+ error = xfs_bmbt_update(cur, got.br_startoff,
+ got.br_startblock,
+ got.br_blockcount,
+ got.br_state);
+ if (error)
+ goto del_cursor;
}
- /* Dive to the next level */
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
- bno = be64_to_cpu(*pp);
- if (unlikely((error =
- xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
- xfs_trans_brelse(tp, bp);
- XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
- XFS_ERRLEVEL_LOW, mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
- xfs_trans_brelse(tp, bp);
- } else {
- /* count all level 1 nodes and their leaves */
- for (;;) {
- nextbno = be64_to_cpu(block->bb_rightsib);
- numrecs = be16_to_cpu(block->bb_numrecs);
- if (unlikely(xfs_bmap_disk_count_leaves(0,
- block, numrecs, count) < 0)) {
- xfs_trans_brelse(tp, bp);
- XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
- XFS_ERRLEVEL_LOW, mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
- xfs_trans_brelse(tp, bp);
- if (nextbno == NULLFSBLOCK)
- break;
- bno = nextbno;
- if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
- XFS_BMAP_BTREE_REF)))
- return error;
- *count += 1;
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- }
+ (*current_ext)++;
+ total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
}
- return 0;
-}
-/*
- * Count leaf blocks given a range of extent records.
- */
-STATIC int
-xfs_bmap_count_leaves(
- xfs_ifork_t *ifp,
- xfs_extnum_t idx,
- int numrecs,
- int *count)
-{
- int b;
-
- for (b = 0; b < numrecs; b++) {
- xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
- *count += xfs_bmbt_get_blockcount(frp);
- }
- return 0;
-}
+ /* Check if we are done */
+ if (*current_ext == total_extents)
+ *done = 1;
-/*
- * Count leaf blocks given a range of extent records originally
- * in btree format.
- */
-STATIC int
-xfs_bmap_disk_count_leaves(
- xfs_extnum_t idx,
- xfs_bmbt_block_t *block,
- int numrecs,
- int *count)
-{
- int b;
- xfs_bmbt_rec_t *frp;
+del_cursor:
+ if (cur)
+ xfs_btree_del_cursor(cur,
+ error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
- for (b = 1; b <= numrecs; b++) {
- frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b);
- *count += xfs_bmbt_disk_get_blockcount(frp);
- }
- return 0;
+ xfs_trans_log_inode(tp, ip, logflags);
+ return error;
}
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 6ff70cda451..b879ca56a64 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -28,20 +28,6 @@ struct xfs_trans;
extern kmem_zone_t *xfs_bmap_free_item_zone;
/*
- * DELTA: describe a change to the in-core extent list.
- *
- * Internally the use of xed_blockount is somewhat funky.
- * xed_blockcount contains an offset much of the time because this
- * makes merging changes easier. (xfs_fileoff_t and xfs_filblks_t are
- * the same underlying type).
- */
-typedef struct xfs_extdelta
-{
- xfs_fileoff_t xed_startoff; /* offset of range */
- xfs_filblks_t xed_blockcount; /* blocks in range */
-} xfs_extdelta_t;
-
-/*
* List of extents to be free "later".
* The list is kept sorted on xbf_startblock.
*/
@@ -54,37 +40,54 @@ typedef struct xfs_bmap_free_item
/*
* Header for free extent list.
+ *
+ * xbf_low is used by the allocator to activate the lowspace algorithm -
+ * when free space is running low the extent allocator may choose to
+ * allocate an extent from an AG without leaving sufficient space for
+ * a btree split when inserting the new extent. In this case the allocator
+ * will enable the lowspace algorithm which is supposed to allow further
+ * allocations (such as btree splits and newroots) to allocate from
+ * sequential AGs. In order to avoid locking AGs out of order the lowspace
+ * algorithm will start searching for free space from AG 0. If the correct
+ * transaction reservations have been made then this algorithm will eventually
+ * find all the space it needs.
*/
typedef struct xfs_bmap_free
{
xfs_bmap_free_item_t *xbf_first; /* list of to-be-free extents */
int xbf_count; /* count of items on list */
- int xbf_low; /* kludge: alloc in low mode */
+ int xbf_low; /* alloc in low mode */
} xfs_bmap_free_t;
#define XFS_BMAP_MAX_NMAP 4
/*
- * Flags for xfs_bmapi
+ * Flags for xfs_bmapi_*
*/
-#define XFS_BMAPI_WRITE 0x001 /* write operation: allocate space */
-#define XFS_BMAPI_DELAY 0x002 /* delayed write operation */
-#define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */
-#define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */
-#define XFS_BMAPI_EXACT 0x010 /* allocate only to spec'd bounds */
-#define XFS_BMAPI_ATTRFORK 0x020 /* use attribute fork not data */
-#define XFS_BMAPI_ASYNC 0x040 /* bunmapi xactions can be async */
-#define XFS_BMAPI_RSVBLOCKS 0x080 /* OK to alloc. reserved data blocks */
-#define XFS_BMAPI_PREALLOC 0x100 /* preallocation op: unwritten space */
-#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */
+#define XFS_BMAPI_ENTIRE 0x001 /* return entire extent, not trimmed */
+#define XFS_BMAPI_METADATA 0x002 /* mapping metadata not user data */
+#define XFS_BMAPI_ATTRFORK 0x004 /* use attribute fork not data */
+#define XFS_BMAPI_PREALLOC 0x008 /* preallocation op: unwritten space */
+#define XFS_BMAPI_IGSTATE 0x010 /* Ignore state - */
/* combine contig. space */
-#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */
-/* XFS_BMAPI_DIRECT_IO 0x800 */
-#define XFS_BMAPI_CONVERT 0x1000 /* unwritten extent conversion - */
- /* need write cache flushing and no */
- /* additional allocation alignments */
+#define XFS_BMAPI_CONTIG 0x020 /* must allocate only one extent */
+/*
+ * unwritten extent conversion - this needs write cache flushing and no additional
+ * allocation alignments. When specified with XFS_BMAPI_PREALLOC it converts
+ * from written to unwritten, otherwise convert from unwritten to written.
+ */
+#define XFS_BMAPI_CONVERT 0x040
+
+#define XFS_BMAPI_FLAGS \
+ { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
+ { XFS_BMAPI_METADATA, "METADATA" }, \
+ { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
+ { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
+ { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
+ { XFS_BMAPI_CONTIG, "CONTIG" }, \
+ { XFS_BMAPI_CONVERT, "CONVERT" }
+
-#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w)
static inline int xfs_bmapi_aflag(int w)
{
return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0);
@@ -96,7 +99,6 @@ static inline int xfs_bmapi_aflag(int w)
#define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL)
#define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL)
-#define XFS_BMAP_INIT(flp,fbp) xfs_bmap_init(flp,fbp)
static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
{
((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \
@@ -104,286 +106,81 @@ static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
}
/*
- * Argument structure for xfs_bmap_alloc.
+ * Flags for xfs_bmap_add_extent*.
*/
-typedef struct xfs_bmalloca {
- xfs_fsblock_t firstblock; /* i/o first block allocated */
- xfs_fsblock_t rval; /* starting block of new extent */
- xfs_fileoff_t off; /* offset in file filling in */
- struct xfs_trans *tp; /* transaction pointer */
- struct xfs_inode *ip; /* incore inode pointer */
- struct xfs_bmbt_irec *prevp; /* extent before the new one */
- struct xfs_bmbt_irec *gotp; /* extent after, or delayed */
- xfs_extlen_t alen; /* i/o length asked/allocated */
- xfs_extlen_t total; /* total blocks needed for xaction */
- xfs_extlen_t minlen; /* mininum allocation size (blocks) */
- xfs_extlen_t minleft; /* amount must be left after alloc */
- char eof; /* set if allocating past last extent */
- char wasdel; /* replacing a delayed allocation */
- char userdata;/* set if is user data */
- char low; /* low on space, using seq'l ags */
- char aeof; /* allocated space at eof */
- char conv; /* overwriting unwritten extents */
-} xfs_bmalloca_t;
+#define BMAP_LEFT_CONTIG (1 << 0)
+#define BMAP_RIGHT_CONTIG (1 << 1)
+#define BMAP_LEFT_FILLING (1 << 2)
+#define BMAP_RIGHT_FILLING (1 << 3)
+#define BMAP_LEFT_DELAY (1 << 4)
+#define BMAP_RIGHT_DELAY (1 << 5)
+#define BMAP_LEFT_VALID (1 << 6)
+#define BMAP_RIGHT_VALID (1 << 7)
+#define BMAP_ATTRFORK (1 << 8)
-#ifdef __KERNEL__
+#define XFS_BMAP_EXT_FLAGS \
+ { BMAP_LEFT_CONTIG, "LC" }, \
+ { BMAP_RIGHT_CONTIG, "RC" }, \
+ { BMAP_LEFT_FILLING, "LF" }, \
+ { BMAP_RIGHT_FILLING, "RF" }, \
+ { BMAP_ATTRFORK, "ATTR" }
-#if defined(XFS_BMAP_TRACE)
-/*
- * Trace operations for bmap extent tracing
- */
-#define XFS_BMAP_KTRACE_DELETE 1
-#define XFS_BMAP_KTRACE_INSERT 2
-#define XFS_BMAP_KTRACE_PRE_UP 3
-#define XFS_BMAP_KTRACE_POST_UP 4
-
-#define XFS_BMAP_TRACE_SIZE 4096 /* size of global trace buffer */
-#define XFS_BMAP_KTRACE_SIZE 32 /* size of per-inode trace buffer */
-extern ktrace_t *xfs_bmap_trace_buf;
/*
- * Add bmap trace insert entries for all the contents of the extent list.
+ * This macro is used to determine how many extents will be shifted
+ * in one write transaction. We could require two splits,
+ * an extent move on the first and an extent merge on the second,
+ * So it is proper that one extent is shifted inside write transaction
+ * at a time.
*/
-void
-xfs_bmap_trace_exlist(
- const char *fname, /* function name */
- struct xfs_inode *ip, /* incore inode pointer */
- xfs_extnum_t cnt, /* count of entries in list */
- int whichfork); /* data or attr fork */
+#define XFS_BMAP_MAX_SHIFT_EXTENTS 1
+
+#ifdef DEBUG
+void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
+ int whichfork, unsigned long caller_ip);
#define XFS_BMAP_TRACE_EXLIST(ip,c,w) \
- xfs_bmap_trace_exlist(__func__,ip,c,w)
+ xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_)
#else
#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
#endif
-/*
- * Convert inode from non-attributed to attributed.
- * Must not be in a transaction, ip must not be locked.
- */
-int /* error code */
-xfs_bmap_add_attrfork(
- struct xfs_inode *ip, /* incore inode pointer */
- int size, /* space needed for new attribute */
- int rsvd); /* flag for reserved block allocation */
-
-/*
- * Add the extent to the list of extents to be free at transaction end.
- * The list is maintained sorted (by block number).
- */
-void
-xfs_bmap_add_free(
- xfs_fsblock_t bno, /* fs block number of extent */
- xfs_filblks_t len, /* length of extent */
- xfs_bmap_free_t *flist, /* list of extents */
- struct xfs_mount *mp); /* mount point structure */
-
-/*
- * Routine to clean up the free list data structure when
- * an error occurs during a transaction.
- */
-void
-xfs_bmap_cancel(
- xfs_bmap_free_t *flist); /* free list to clean up */
-
-/*
- * Compute and fill in the value of the maximum depth of a bmap btree
- * in this filesystem. Done once, during mount.
- */
-void
-xfs_bmap_compute_maxlevels(
- struct xfs_mount *mp, /* file system mount structure */
- int whichfork); /* data or attr fork */
-
-/*
- * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
- * caller. Frees all the extents that need freeing, which must be done
- * last due to locking considerations.
- *
- * Return 1 if the given transaction was committed and a new one allocated,
- * and 0 otherwise.
- */
-int /* error */
-xfs_bmap_finish(
- struct xfs_trans **tp, /* transaction pointer addr */
- xfs_bmap_free_t *flist, /* i/o: list extents to free */
- int *committed); /* xact committed or not */
-
-/*
- * Returns the file-relative block number of the first unused block in the file.
- * This is the lowest-address hole if the file has holes, else the first block
- * past the end of file.
- */
-int /* error */
-xfs_bmap_first_unused(
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_inode *ip, /* incore inode */
- xfs_extlen_t len, /* size of hole to find */
- xfs_fileoff_t *unused, /* unused block num */
- int whichfork); /* data or attr fork */
-
-/*
- * Returns the file-relative block number of the last block + 1 before
- * last_block (input value) in the file.
- * This is not based on i_size, it is based on the extent list.
- * Returns 0 for local files, as they do not have an extent list.
- */
-int /* error */
-xfs_bmap_last_before(
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_inode *ip, /* incore inode */
- xfs_fileoff_t *last_block, /* last block */
- int whichfork); /* data or attr fork */
-
-/*
- * Returns the file-relative block number of the first block past eof in
- * the file. This is not based on i_size, it is based on the extent list.
- * Returns 0 for local files, as they do not have an extent list.
- */
-int /* error */
-xfs_bmap_last_offset(
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_inode *ip, /* incore inode */
- xfs_fileoff_t *unused, /* last block num */
- int whichfork); /* data or attr fork */
-
-/*
- * Returns whether the selected fork of the inode has exactly one
- * block or not. For the data fork we check this matches di_size,
- * implying the file's range is 0..bsize-1.
- */
-int
-xfs_bmap_one_block(
- struct xfs_inode *ip, /* incore inode */
- int whichfork); /* data or attr fork */
-
-/*
- * Read in the extents to iu_extents.
- * All inode fields are set up by caller, we just traverse the btree
- * and copy the records in.
- */
-int /* error */
-xfs_bmap_read_extents(
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_inode *ip, /* incore inode */
- int whichfork); /* data or attr fork */
-
-/*
- * Map file blocks to filesystem blocks.
- * File range is given by the bno/len pair.
- * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set)
- * into a hole or past eof.
- * Only allocates blocks from a single allocation group,
- * to avoid locking problems.
- * The returned value in "firstblock" from the first call in a transaction
- * must be remembered and presented to subsequent calls in "firstblock".
- * An upper bound for the number of blocks to be allocated is supplied to
- * the first call in "total"; if no allocation group has that many free
- * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
- */
-int /* error */
-xfs_bmapi(
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_inode *ip, /* incore inode */
- xfs_fileoff_t bno, /* starting file offs. mapped */
- xfs_filblks_t len, /* length to map in file */
- int flags, /* XFS_BMAPI_... */
- xfs_fsblock_t *firstblock, /* first allocated block
- controls a.g. for allocs */
- xfs_extlen_t total, /* total blocks needed */
- struct xfs_bmbt_irec *mval, /* output: map values */
- int *nmap, /* i/o: mval size/count */
- xfs_bmap_free_t *flist, /* i/o: list extents to free */
- xfs_extdelta_t *delta); /* o: change made to incore
- extents */
-
-/*
- * Map file blocks to filesystem blocks, simple version.
- * One block only, read-only.
- * For flags, only the XFS_BMAPI_ATTRFORK flag is examined.
- * For the other flag values, the effect is as if XFS_BMAPI_METADATA
- * was set and all the others were clear.
- */
-int /* error */
-xfs_bmapi_single(
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_inode *ip, /* incore inode */
- int whichfork, /* data or attr fork */
- xfs_fsblock_t *fsb, /* output: mapped block */
- xfs_fileoff_t bno); /* starting file offs. mapped */
-
-/*
- * Unmap (remove) blocks from a file.
- * If nexts is nonzero then the number of extents to remove is limited to
- * that value. If not all extents in the block range can be removed then
- * *done is set.
- */
-int /* error */
-xfs_bunmapi(
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_inode *ip, /* incore inode */
- xfs_fileoff_t bno, /* starting offset to unmap */
- xfs_filblks_t len, /* length to unmap in file */
- int flags, /* XFS_BMAPI_... */
- xfs_extnum_t nexts, /* number of extents max */
- xfs_fsblock_t *firstblock, /* first allocated block
- controls a.g. for allocs */
- xfs_bmap_free_t *flist, /* i/o: list extents to free */
- xfs_extdelta_t *delta, /* o: change made to incore
- extents */
- int *done); /* set if not done yet */
-
-/*
- * Fcntl interface to xfs_bmapi.
- */
-int /* error code */
-xfs_getbmap(
- xfs_inode_t *ip,
- struct getbmap *bmv, /* user bmap structure */
- void __user *ap, /* pointer to user's array */
- int iflags); /* interface flags */
-
-/*
- * Check if the endoff is outside the last extent. If so the caller will grow
- * the allocation to a stripe unit boundary
- */
-int
-xfs_bmap_eof(
- struct xfs_inode *ip,
- xfs_fileoff_t endoff,
- int whichfork,
- int *eof);
-
-/*
- * Count fsblocks of the given fork.
- */
-int
-xfs_bmap_count_blocks(
- xfs_trans_t *tp,
- struct xfs_inode *ip,
- int whichfork,
- int *count);
-
-/*
- * Check an extent list, which has just been read, for
- * any bit in the extent flag field.
- */
-int
-xfs_check_nostate_extents(
- struct xfs_ifork *ifp,
- xfs_extnum_t idx,
- xfs_extnum_t num);
-
-/*
- * Search the extent records for the entry containing block bno.
- * If bno lies in a hole, point to the next entry. If bno lies
- * past eof, *eofp will be set, and *prevp will contain the last
- * entry (null if none). Else, *lastxp will be set to the index
- * of the found entry; *gotp will contain the entry.
- */
-xfs_bmbt_rec_host_t *
-xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *,
- xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
-
-#endif /* __KERNEL__ */
+int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
+void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
+void xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len,
+ struct xfs_bmap_free *flist, struct xfs_mount *mp);
+void xfs_bmap_cancel(struct xfs_bmap_free *flist);
+void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
+int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
+ xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
+int xfs_bmap_last_before(struct xfs_trans *tp, struct xfs_inode *ip,
+ xfs_fileoff_t *last_block, int whichfork);
+int xfs_bmap_last_offset(struct xfs_inode *ip, xfs_fileoff_t *unused,
+ int whichfork);
+int xfs_bmap_one_block(struct xfs_inode *ip, int whichfork);
+int xfs_bmap_read_extents(struct xfs_trans *tp, struct xfs_inode *ip,
+ int whichfork);
+int xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
+ xfs_filblks_t len, struct xfs_bmbt_irec *mval,
+ int *nmap, int flags);
+int xfs_bmapi_delay(struct xfs_inode *ip, xfs_fileoff_t bno,
+ xfs_filblks_t len, struct xfs_bmbt_irec *mval,
+ int *nmap, int flags);
+int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
+ xfs_fileoff_t bno, xfs_filblks_t len, int flags,
+ xfs_fsblock_t *firstblock, xfs_extlen_t total,
+ struct xfs_bmbt_irec *mval, int *nmap,
+ struct xfs_bmap_free *flist);
+int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
+ xfs_fileoff_t bno, xfs_filblks_t len, int flags,
+ xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
+ struct xfs_bmap_free *flist, int *done);
+int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
+ xfs_extnum_t num);
+uint xfs_default_attroffset(struct xfs_inode *ip);
+int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
+ int *done, xfs_fileoff_t start_fsb,
+ xfs_fileoff_t offset_shift_fsb, xfs_extnum_t *current_ext,
+ xfs_fsblock_t *firstblock, struct xfs_bmap_free *flist,
+ int num_exts);
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 4f0e849d973..948836c4fd9 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -17,1425 +17,26 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_itable.h"
+#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
#include "xfs_quota.h"
-
-#if defined(XFS_BMBT_TRACE)
-ktrace_t *xfs_bmbt_trace_buf;
-#endif
-
-/*
- * Prototypes for internal btree functions.
- */
-
-
-STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *);
-STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
-STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
-STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *);
-STATIC int xfs_bmbt_rshift(xfs_btree_cur_t *, int, int *);
-STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *,
- __uint64_t *, xfs_btree_cur_t **, int *);
-STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int);
-
-
-#if defined(XFS_BMBT_TRACE)
-
-static char ARGS[] = "args";
-static char ENTRY[] = "entry";
-static char ERROR[] = "error";
-#undef EXIT
-static char EXIT[] = "exit";
-
-/*
- * Add a trace buffer entry for the arguments given to the routine,
- * generic form.
- */
-STATIC void
-xfs_bmbt_trace_enter(
- const char *func,
- xfs_btree_cur_t *cur,
- char *s,
- int type,
- int line,
- __psunsigned_t a0,
- __psunsigned_t a1,
- __psunsigned_t a2,
- __psunsigned_t a3,
- __psunsigned_t a4,
- __psunsigned_t a5,
- __psunsigned_t a6,
- __psunsigned_t a7,
- __psunsigned_t a8,
- __psunsigned_t a9,
- __psunsigned_t a10)
-{
- xfs_inode_t *ip;
- int whichfork;
-
- ip = cur->bc_private.b.ip;
- whichfork = cur->bc_private.b.whichfork;
- ktrace_enter(xfs_bmbt_trace_buf,
- (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
- (void *)func, (void *)s, (void *)ip, (void *)cur,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)a8, (void *)a9, (void *)a10);
- ASSERT(ip->i_btrace);
- ktrace_enter(ip->i_btrace,
- (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
- (void *)func, (void *)s, (void *)ip, (void *)cur,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)a8, (void *)a9, (void *)a10);
-}
-/*
- * Add a trace buffer entry for arguments, for a buffer & 1 integer arg.
- */
-STATIC void
-xfs_bmbt_trace_argbi(
- const char *func,
- xfs_btree_cur_t *cur,
- xfs_buf_t *b,
- int i,
- int line)
-{
- xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBI, line,
- (__psunsigned_t)b, i, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for a buffer & 2 integer args.
- */
-STATIC void
-xfs_bmbt_trace_argbii(
- const char *func,
- xfs_btree_cur_t *cur,
- xfs_buf_t *b,
- int i0,
- int i1,
- int line)
-{
- xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBII, line,
- (__psunsigned_t)b, i0, i1, 0,
- 0, 0, 0, 0,
- 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for 3 block-length args
- * and an integer arg.
- */
-STATIC void
-xfs_bmbt_trace_argfffi(
- const char *func,
- xfs_btree_cur_t *cur,
- xfs_dfiloff_t o,
- xfs_dfsbno_t b,
- xfs_dfilblks_t i,
- int j,
- int line)
-{
- xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGFFFI, line,
- o >> 32, (int)o, b >> 32, (int)b,
- i >> 32, (int)i, (int)j, 0,
- 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for one integer arg.
- */
-STATIC void
-xfs_bmbt_trace_argi(
- const char *func,
- xfs_btree_cur_t *cur,
- int i,
- int line)
-{
- xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGI, line,
- i, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for int, fsblock, key.
- */
-STATIC void
-xfs_bmbt_trace_argifk(
- const char *func,
- xfs_btree_cur_t *cur,
- int i,
- xfs_fsblock_t f,
- xfs_dfiloff_t o,
- int line)
-{
- xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line,
- i, (xfs_dfsbno_t)f >> 32, (int)f, o >> 32,
- (int)o, 0, 0, 0,
- 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for int, fsblock, rec.
- */
-STATIC void
-xfs_bmbt_trace_argifr(
- const char *func,
- xfs_btree_cur_t *cur,
- int i,
- xfs_fsblock_t f,
- xfs_bmbt_rec_t *r,
- int line)
-{
- xfs_dfsbno_t b;
- xfs_dfilblks_t c;
- xfs_dfsbno_t d;
- xfs_dfiloff_t o;
- xfs_bmbt_irec_t s;
-
- d = (xfs_dfsbno_t)f;
- xfs_bmbt_disk_get_all(r, &s);
- o = (xfs_dfiloff_t)s.br_startoff;
- b = (xfs_dfsbno_t)s.br_startblock;
- c = s.br_blockcount;
- xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFR, line,
- i, d >> 32, (int)d, o >> 32,
- (int)o, b >> 32, (int)b, c >> 32,
- (int)c, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for int, key.
- */
-STATIC void
-xfs_bmbt_trace_argik(
- const char *func,
- xfs_btree_cur_t *cur,
- int i,
- xfs_bmbt_key_t *k,
- int line)
-{
- xfs_dfiloff_t o;
-
- o = be64_to_cpu(k->br_startoff);
- xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line,
- i, o >> 32, (int)o, 0,
- 0, 0, 0, 0,
- 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for the cursor/operation.
- */
-STATIC void
-xfs_bmbt_trace_cursor(
- const char *func,
- xfs_btree_cur_t *cur,
- char *s,
- int line)
-{
- xfs_bmbt_rec_host_t r;
-
- xfs_bmbt_set_all(&r, &cur->bc_rec.b);
- xfs_bmbt_trace_enter(func, cur, s, XFS_BMBT_KTRACE_CUR, line,
- (cur->bc_nlevels << 24) | (cur->bc_private.b.flags << 16) |
- cur->bc_private.b.allocated,
- r.l0 >> 32, (int)r.l0,
- r.l1 >> 32, (int)r.l1,
- (unsigned long)cur->bc_bufs[0], (unsigned long)cur->bc_bufs[1],
- (unsigned long)cur->bc_bufs[2], (unsigned long)cur->bc_bufs[3],
- (cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1],
- (cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]);
-}
-
-#define XFS_BMBT_TRACE_ARGBI(c,b,i) \
- xfs_bmbt_trace_argbi(__func__, c, b, i, __LINE__)
-#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \
- xfs_bmbt_trace_argbii(__func__, c, b, i, j, __LINE__)
-#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \
- xfs_bmbt_trace_argfffi(__func__, c, o, b, i, j, __LINE__)
-#define XFS_BMBT_TRACE_ARGI(c,i) \
- xfs_bmbt_trace_argi(__func__, c, i, __LINE__)
-#define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \
- xfs_bmbt_trace_argifk(__func__, c, i, f, s, __LINE__)
-#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \
- xfs_bmbt_trace_argifr(__func__, c, i, f, r, __LINE__)
-#define XFS_BMBT_TRACE_ARGIK(c,i,k) \
- xfs_bmbt_trace_argik(__func__, c, i, k, __LINE__)
-#define XFS_BMBT_TRACE_CURSOR(c,s) \
- xfs_bmbt_trace_cursor(__func__, c, s, __LINE__)
-#else
-#define XFS_BMBT_TRACE_ARGBI(c,b,i)
-#define XFS_BMBT_TRACE_ARGBII(c,b,i,j)
-#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j)
-#define XFS_BMBT_TRACE_ARGI(c,i)
-#define XFS_BMBT_TRACE_ARGIFK(c,i,f,s)
-#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r)
-#define XFS_BMBT_TRACE_ARGIK(c,i,k)
-#define XFS_BMBT_TRACE_CURSOR(c,s)
-#endif /* XFS_BMBT_TRACE */
-
-
-/*
- * Internal functions.
- */
-
-/*
- * Delete record pointed to by cur/level.
- */
-STATIC int /* error */
-xfs_bmbt_delrec(
- xfs_btree_cur_t *cur,
- int level,
- int *stat) /* success/failure */
-{
- xfs_bmbt_block_t *block; /* bmap btree block */
- xfs_fsblock_t bno; /* fs-relative block number */
- xfs_buf_t *bp; /* buffer for block */
- int error; /* error return value */
- int i; /* loop counter */
- int j; /* temp state */
- xfs_bmbt_key_t key; /* bmap btree key */
- xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
- xfs_fsblock_t lbno; /* left sibling block number */
- xfs_buf_t *lbp; /* left buffer pointer */
- xfs_bmbt_block_t *left; /* left btree block */
- xfs_bmbt_key_t *lkp; /* left btree key */
- xfs_bmbt_ptr_t *lpp; /* left address pointer */
- int lrecs=0; /* left record count */
- xfs_bmbt_rec_t *lrp; /* left record pointer */
- xfs_mount_t *mp; /* file system mount point */
- xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
- int ptr; /* key/record index */
- xfs_fsblock_t rbno; /* right sibling block number */
- xfs_buf_t *rbp; /* right buffer pointer */
- xfs_bmbt_block_t *right; /* right btree block */
- xfs_bmbt_key_t *rkp; /* right btree key */
- xfs_bmbt_rec_t *rp; /* pointer to bmap btree rec */
- xfs_bmbt_ptr_t *rpp; /* right address pointer */
- xfs_bmbt_block_t *rrblock; /* right-right btree block */
- xfs_buf_t *rrbp; /* right-right buffer pointer */
- int rrecs=0; /* right record count */
- xfs_bmbt_rec_t *rrp; /* right record pointer */
- xfs_btree_cur_t *tcur; /* temporary btree cursor */
- int numrecs; /* temporary numrec count */
- int numlrecs, numrrecs;
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGI(cur, level);
- ptr = cur->bc_ptrs[level];
- tcur = NULL;
- if (ptr == 0) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- block = xfs_bmbt_get_block(cur, level, &bp);
- numrecs = be16_to_cpu(block->bb_numrecs);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
-#endif
- if (ptr > numrecs) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- XFS_STATS_INC(xs_bmbt_delrec);
- if (level > 0) {
- kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
- pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
-#ifdef DEBUG
- for (i = ptr; i < numrecs; i++) {
- if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- }
-#endif
- if (ptr < numrecs) {
- memmove(&kp[ptr - 1], &kp[ptr],
- (numrecs - ptr) * sizeof(*kp));
- memmove(&pp[ptr - 1], &pp[ptr],
- (numrecs - ptr) * sizeof(*pp));
- xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1);
- xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1);
- }
- } else {
- rp = XFS_BMAP_REC_IADDR(block, 1, cur);
- if (ptr < numrecs) {
- memmove(&rp[ptr - 1], &rp[ptr],
- (numrecs - ptr) * sizeof(*rp));
- xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1);
- }
- if (ptr == 1) {
- key.br_startoff =
- cpu_to_be64(xfs_bmbt_disk_get_startoff(rp));
- kp = &key;
- }
- }
- numrecs--;
- block->bb_numrecs = cpu_to_be16(numrecs);
- xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
- /*
- * We're at the root level.
- * First, shrink the root block in-memory.
- * Try to get rid of the next level down.
- * If we can't then there's nothing left to do.
- */
- if (level == cur->bc_nlevels - 1) {
- xfs_iroot_realloc(cur->bc_private.b.ip, -1,
- cur->bc_private.b.whichfork);
- if ((error = xfs_bmbt_killroot(cur))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
- }
- if (ptr == 1 && (error = xfs_bmbt_updkey(cur, kp, level + 1))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
- if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
- }
- rbno = be64_to_cpu(block->bb_rightsib);
- lbno = be64_to_cpu(block->bb_leftsib);
- /*
- * One child of root, need to get a chance to copy its contents
- * into the root and delete it. Can't go up to next level,
- * there's nothing to delete there.
- */
- if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK &&
- level == cur->bc_nlevels - 2) {
- if ((error = xfs_bmbt_killroot(cur))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
- }
- ASSERT(rbno != NULLFSBLOCK || lbno != NULLFSBLOCK);
- if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- bno = NULLFSBLOCK;
- if (rbno != NULLFSBLOCK) {
- i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_bmbt_increment(tcur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- rbp = tcur->bc_bufs[level];
- right = XFS_BUF_TO_BMBT_BLOCK(rbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
-#endif
- bno = be64_to_cpu(right->bb_leftsib);
- if (be16_to_cpu(right->bb_numrecs) - 1 >=
- XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
- if ((error = xfs_bmbt_lshift(tcur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- if (i) {
- ASSERT(be16_to_cpu(block->bb_numrecs) >=
- XFS_BMAP_BLOCK_IMINRECS(level, tcur));
- xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
- tcur = NULL;
- if (level > 0) {
- if ((error = xfs_bmbt_decrement(cur,
- level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur,
- ERROR);
- goto error0;
- }
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
- }
- }
- rrecs = be16_to_cpu(right->bb_numrecs);
- if (lbno != NULLFSBLOCK) {
- i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_bmbt_decrement(tcur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- }
- }
- if (lbno != NULLFSBLOCK) {
- i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- /*
- * decrement to last in block
- */
- if ((error = xfs_bmbt_decrement(tcur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- lbp = tcur->bc_bufs[level];
- left = XFS_BUF_TO_BMBT_BLOCK(lbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
-#endif
- bno = be64_to_cpu(left->bb_rightsib);
- if (be16_to_cpu(left->bb_numrecs) - 1 >=
- XFS_BMAP_BLOCK_IMINRECS(level, cur)) {
- if ((error = xfs_bmbt_rshift(tcur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- if (i) {
- ASSERT(be16_to_cpu(block->bb_numrecs) >=
- XFS_BMAP_BLOCK_IMINRECS(level, tcur));
- xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
- tcur = NULL;
- if (level == 0)
- cur->bc_ptrs[0]++;
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
- }
- }
- lrecs = be16_to_cpu(left->bb_numrecs);
- }
- xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
- tcur = NULL;
- mp = cur->bc_mp;
- ASSERT(bno != NULLFSBLOCK);
- if (lbno != NULLFSBLOCK &&
- lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
- rbno = bno;
- right = block;
- rbp = bp;
- if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, lbno, 0, &lbp,
- XFS_BMAP_BTREE_REF))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- left = XFS_BUF_TO_BMBT_BLOCK(lbp);
- if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- } else if (rbno != NULLFSBLOCK &&
- rrecs + be16_to_cpu(block->bb_numrecs) <=
- XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
- lbno = bno;
- left = block;
- lbp = bp;
- if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, rbno, 0, &rbp,
- XFS_BMAP_BTREE_REF))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- right = XFS_BUF_TO_BMBT_BLOCK(rbp);
- if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- lrecs = be16_to_cpu(left->bb_numrecs);
- } else {
- if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
- }
- numlrecs = be16_to_cpu(left->bb_numrecs);
- numrrecs = be16_to_cpu(right->bb_numrecs);
- if (level > 0) {
- lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur);
- lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur);
- rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
- rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
-#ifdef DEBUG
- for (i = 0; i < numrrecs; i++) {
- if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- }
-#endif
- memcpy(lkp, rkp, numrrecs * sizeof(*lkp));
- memcpy(lpp, rpp, numrrecs * sizeof(*lpp));
- xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
- xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
- } else {
- lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur);
- rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
- memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
- xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
- }
- be16_add_cpu(&left->bb_numrecs, numrrecs);
- left->bb_rightsib = right->bb_rightsib;
- xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
- if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) {
- if ((error = xfs_btree_read_bufl(mp, cur->bc_tp,
- be64_to_cpu(left->bb_rightsib),
- 0, &rrbp, XFS_BMAP_BTREE_REF))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
- if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- rrblock->bb_leftsib = cpu_to_be64(lbno);
- xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
- }
- xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1,
- cur->bc_private.b.flist, mp);
- cur->bc_private.b.ip->i_d.di_nblocks--;
- xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
- XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip,
- XFS_TRANS_DQ_BCOUNT, -1L);
- xfs_trans_binval(cur->bc_tp, rbp);
- if (bp != lbp) {
- cur->bc_bufs[level] = lbp;
- cur->bc_ptrs[level] += lrecs;
- cur->bc_ra[level] = 0;
- } else if ((error = xfs_bmbt_increment(cur, level + 1, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- goto error0;
- }
- if (level > 0)
- cur->bc_ptrs[level]--;
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 2;
- return 0;
-
-error0:
- if (tcur)
- xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
- return error;
-}
-
-/*
- * Insert one record/level. Return information to the caller
- * allowing the next level up to proceed if necessary.
- */
-STATIC int /* error */
-xfs_bmbt_insrec(
- xfs_btree_cur_t *cur,
- int level,
- xfs_fsblock_t *bnop,
- xfs_bmbt_rec_t *recp,
- xfs_btree_cur_t **curp,
- int *stat) /* no-go/done/continue */
-{
- xfs_bmbt_block_t *block; /* bmap btree block */
- xfs_buf_t *bp; /* buffer for block */
- int error; /* error return value */
- int i; /* loop index */
- xfs_bmbt_key_t key; /* bmap btree key */
- xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
- int logflags; /* inode logging flags */
- xfs_fsblock_t nbno; /* new block number */
- struct xfs_btree_cur *ncur; /* new btree cursor */
- __uint64_t startoff; /* new btree key value */
- xfs_bmbt_rec_t nrec; /* new record count */
- int optr; /* old key/record index */
- xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
- int ptr; /* key/record index */
- xfs_bmbt_rec_t *rp=NULL; /* pointer to bmap btree rec */
- int numrecs;
-
- ASSERT(level < cur->bc_nlevels);
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp);
- ncur = NULL;
- key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(recp));
- optr = ptr = cur->bc_ptrs[level];
- if (ptr == 0) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- XFS_STATS_INC(xs_bmbt_insrec);
- block = xfs_bmbt_get_block(cur, level, &bp);
- numrecs = be16_to_cpu(block->bb_numrecs);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- if (ptr <= numrecs) {
- if (level == 0) {
- rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
- xfs_btree_check_rec(XFS_BTNUM_BMAP, recp, rp);
- } else {
- kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
- xfs_btree_check_key(XFS_BTNUM_BMAP, &key, kp);
- }
- }
-#endif
- nbno = NULLFSBLOCK;
- if (numrecs == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
- if (numrecs < XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
- /*
- * A root block, that can be made bigger.
- */
- xfs_iroot_realloc(cur->bc_private.b.ip, 1,
- cur->bc_private.b.whichfork);
- block = xfs_bmbt_get_block(cur, level, &bp);
- } else if (level == cur->bc_nlevels - 1) {
- if ((error = xfs_bmbt_newroot(cur, &logflags, stat)) ||
- *stat == 0) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
- logflags);
- block = xfs_bmbt_get_block(cur, level, &bp);
- } else {
- if ((error = xfs_bmbt_rshift(cur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- if (i) {
- /* nothing */
- } else {
- if ((error = xfs_bmbt_lshift(cur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- if (i) {
- optr = ptr = cur->bc_ptrs[level];
- } else {
- if ((error = xfs_bmbt_split(cur, level,
- &nbno, &startoff, &ncur,
- &i))) {
- XFS_BMBT_TRACE_CURSOR(cur,
- ERROR);
- return error;
- }
- if (i) {
- block = xfs_bmbt_get_block(
- cur, level, &bp);
-#ifdef DEBUG
- if ((error =
- xfs_btree_check_lblock(cur,
- block, level, bp))) {
- XFS_BMBT_TRACE_CURSOR(
- cur, ERROR);
- return error;
- }
-#endif
- ptr = cur->bc_ptrs[level];
- xfs_bmbt_disk_set_allf(&nrec,
- startoff, 0, 0,
- XFS_EXT_NORM);
- } else {
- XFS_BMBT_TRACE_CURSOR(cur,
- EXIT);
- *stat = 0;
- return 0;
- }
- }
- }
- }
- }
- numrecs = be16_to_cpu(block->bb_numrecs);
- if (level > 0) {
- kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
- pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
-#ifdef DEBUG
- for (i = numrecs; i >= ptr; i--) {
- if ((error = xfs_btree_check_lptr_disk(cur, pp[i - 1],
- level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- }
-#endif
- memmove(&kp[ptr], &kp[ptr - 1],
- (numrecs - ptr + 1) * sizeof(*kp));
- memmove(&pp[ptr], &pp[ptr - 1],
- (numrecs - ptr + 1) * sizeof(*pp));
-#ifdef DEBUG
- if ((error = xfs_btree_check_lptr(cur, *bnop, level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- kp[ptr - 1] = key;
- pp[ptr - 1] = cpu_to_be64(*bnop);
- numrecs++;
- block->bb_numrecs = cpu_to_be16(numrecs);
- xfs_bmbt_log_keys(cur, bp, ptr, numrecs);
- xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs);
- } else {
- rp = XFS_BMAP_REC_IADDR(block, 1, cur);
- memmove(&rp[ptr], &rp[ptr - 1],
- (numrecs - ptr + 1) * sizeof(*rp));
- rp[ptr - 1] = *recp;
- numrecs++;
- block->bb_numrecs = cpu_to_be16(numrecs);
- xfs_bmbt_log_recs(cur, bp, ptr, numrecs);
- }
- xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
-#ifdef DEBUG
- if (ptr < numrecs) {
- if (level == 0)
- xfs_btree_check_rec(XFS_BTNUM_BMAP, rp + ptr - 1,
- rp + ptr);
- else
- xfs_btree_check_key(XFS_BTNUM_BMAP, kp + ptr - 1,
- kp + ptr);
- }
-#endif
- if (optr == 1 && (error = xfs_bmbt_updkey(cur, &key, level + 1))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- *bnop = nbno;
- if (nbno != NULLFSBLOCK) {
- *recp = nrec;
- *curp = ncur;
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
-}
-
-STATIC int
-xfs_bmbt_killroot(
- xfs_btree_cur_t *cur)
-{
- xfs_bmbt_block_t *block;
- xfs_bmbt_block_t *cblock;
- xfs_buf_t *cbp;
- xfs_bmbt_key_t *ckp;
- xfs_bmbt_ptr_t *cpp;
-#ifdef DEBUG
- int error;
-#endif
- int i;
- xfs_bmbt_key_t *kp;
- xfs_inode_t *ip;
- xfs_ifork_t *ifp;
- int level;
- xfs_bmbt_ptr_t *pp;
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- level = cur->bc_nlevels - 1;
- ASSERT(level >= 1);
- /*
- * Don't deal with the root block needs to be a leaf case.
- * We're just going to turn the thing back into extents anyway.
- */
- if (level == 1) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- return 0;
- }
- block = xfs_bmbt_get_block(cur, level, &cbp);
- /*
- * Give up if the root has multiple children.
- */
- if (be16_to_cpu(block->bb_numrecs) != 1) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- return 0;
- }
- /*
- * Only do this if the next level will fit.
- * Then the data must be copied up to the inode,
- * instead of freeing the root you free the next level.
- */
- cbp = cur->bc_bufs[level - 1];
- cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
- if (be16_to_cpu(cblock->bb_numrecs) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- return 0;
- }
- ASSERT(be64_to_cpu(cblock->bb_leftsib) == NULLDFSBNO);
- ASSERT(be64_to_cpu(cblock->bb_rightsib) == NULLDFSBNO);
- ip = cur->bc_private.b.ip;
- ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork);
- ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) ==
- XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes));
- i = (int)(be16_to_cpu(cblock->bb_numrecs) - XFS_BMAP_BLOCK_IMAXRECS(level, cur));
- if (i) {
- xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork);
- block = ifp->if_broot;
- }
- be16_add_cpu(&block->bb_numrecs, i);
- ASSERT(block->bb_numrecs == cblock->bb_numrecs);
- kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
- ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
- memcpy(kp, ckp, be16_to_cpu(block->bb_numrecs) * sizeof(*kp));
- pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
- cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
-#ifdef DEBUG
- for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
- if ((error = xfs_btree_check_lptr_disk(cur, cpp[i], level - 1))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- }
-#endif
- memcpy(pp, cpp, be16_to_cpu(block->bb_numrecs) * sizeof(*pp));
- xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1,
- cur->bc_private.b.flist, cur->bc_mp);
- ip->i_d.di_nblocks--;
- XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip,
- XFS_TRANS_DQ_BCOUNT, -1L);
- xfs_trans_binval(cur->bc_tp, cbp);
- cur->bc_bufs[level - 1] = NULL;
- be16_add_cpu(&block->bb_level, -1);
- xfs_trans_log_inode(cur->bc_tp, ip,
- XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
- cur->bc_nlevels--;
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- return 0;
-}
-
-/*
- * Log key values from the btree block.
- */
-STATIC void
-xfs_bmbt_log_keys(
- xfs_btree_cur_t *cur,
- xfs_buf_t *bp,
- int kfirst,
- int klast)
-{
- xfs_trans_t *tp;
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGBII(cur, bp, kfirst, klast);
- tp = cur->bc_tp;
- if (bp) {
- xfs_bmbt_block_t *block;
- int first;
- xfs_bmbt_key_t *kp;
- int last;
-
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- kp = XFS_BMAP_KEY_DADDR(block, 1, cur);
- first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
- last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
- xfs_trans_log_buf(tp, bp, first, last);
- } else {
- xfs_inode_t *ip;
-
- ip = cur->bc_private.b.ip;
- xfs_trans_log_inode(tp, ip,
- XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
-}
-
-/*
- * Log pointer values from the btree block.
- */
-STATIC void
-xfs_bmbt_log_ptrs(
- xfs_btree_cur_t *cur,
- xfs_buf_t *bp,
- int pfirst,
- int plast)
-{
- xfs_trans_t *tp;
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGBII(cur, bp, pfirst, plast);
- tp = cur->bc_tp;
- if (bp) {
- xfs_bmbt_block_t *block;
- int first;
- int last;
- xfs_bmbt_ptr_t *pp;
-
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- pp = XFS_BMAP_PTR_DADDR(block, 1, cur);
- first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
- last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
- xfs_trans_log_buf(tp, bp, first, last);
- } else {
- xfs_inode_t *ip;
-
- ip = cur->bc_private.b.ip;
- xfs_trans_log_inode(tp, ip,
- XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
-}
-
-/*
- * Lookup the record. The cursor is made to point to it, based on dir.
- */
-STATIC int /* error */
-xfs_bmbt_lookup(
- xfs_btree_cur_t *cur,
- xfs_lookup_t dir,
- int *stat) /* success/failure */
-{
- xfs_bmbt_block_t *block=NULL;
- xfs_buf_t *bp;
- xfs_daddr_t d;
- xfs_sfiloff_t diff;
- int error; /* error return value */
- xfs_fsblock_t fsbno=0;
- int high;
- int i;
- int keyno=0;
- xfs_bmbt_key_t *kkbase=NULL;
- xfs_bmbt_key_t *kkp;
- xfs_bmbt_rec_t *krbase=NULL;
- xfs_bmbt_rec_t *krp;
- int level;
- int low;
- xfs_mount_t *mp;
- xfs_bmbt_ptr_t *pp;
- xfs_bmbt_irec_t *rp;
- xfs_fileoff_t startoff;
- xfs_trans_t *tp;
-
- XFS_STATS_INC(xs_bmbt_lookup);
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGI(cur, (int)dir);
- tp = cur->bc_tp;
- mp = cur->bc_mp;
- rp = &cur->bc_rec.b;
- for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
- if (level < cur->bc_nlevels - 1) {
- d = XFS_FSB_TO_DADDR(mp, fsbno);
- bp = cur->bc_bufs[level];
- if (bp && XFS_BUF_ADDR(bp) != d)
- bp = NULL;
- if (!bp) {
- if ((error = xfs_btree_read_bufl(mp, tp, fsbno,
- 0, &bp, XFS_BMAP_BTREE_REF))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- xfs_btree_setbuf(cur, level, bp);
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- if ((error = xfs_btree_check_lblock(cur, block,
- level, bp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- } else
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- } else
- block = xfs_bmbt_get_block(cur, level, &bp);
- if (diff == 0)
- keyno = 1;
- else {
- if (level > 0)
- kkbase = XFS_BMAP_KEY_IADDR(block, 1, cur);
- else
- krbase = XFS_BMAP_REC_IADDR(block, 1, cur);
- low = 1;
- if (!(high = be16_to_cpu(block->bb_numrecs))) {
- ASSERT(level == 0);
- cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- while (low <= high) {
- XFS_STATS_INC(xs_bmbt_compare);
- keyno = (low + high) >> 1;
- if (level > 0) {
- kkp = kkbase + keyno - 1;
- startoff = be64_to_cpu(kkp->br_startoff);
- } else {
- krp = krbase + keyno - 1;
- startoff = xfs_bmbt_disk_get_startoff(krp);
- }
- diff = (xfs_sfiloff_t)
- (startoff - rp->br_startoff);
- if (diff < 0)
- low = keyno + 1;
- else if (diff > 0)
- high = keyno - 1;
- else
- break;
- }
- }
- if (level > 0) {
- if (diff > 0 && --keyno < 1)
- keyno = 1;
- pp = XFS_BMAP_PTR_IADDR(block, keyno, cur);
- fsbno = be64_to_cpu(*pp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lptr(cur, fsbno, level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- cur->bc_ptrs[level] = keyno;
- }
- }
- if (dir != XFS_LOOKUP_LE && diff < 0) {
- keyno++;
- /*
- * If ge search and we went off the end of the block, but it's
- * not the last block, we're in the wrong block.
- */
- if (dir == XFS_LOOKUP_GE && keyno > be16_to_cpu(block->bb_numrecs) &&
- be64_to_cpu(block->bb_rightsib) != NULLDFSBNO) {
- cur->bc_ptrs[0] = keyno;
- if ((error = xfs_bmbt_increment(cur, 0, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- XFS_WANT_CORRUPTED_RETURN(i == 1);
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
- }
- }
- else if (dir == XFS_LOOKUP_LE && diff > 0)
- keyno--;
- cur->bc_ptrs[0] = keyno;
- if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- } else {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0));
- }
- return 0;
-}
-
-/*
- * Move 1 record left from cur/level if possible.
- * Update cur to reflect the new path.
- */
-STATIC int /* error */
-xfs_bmbt_lshift(
- xfs_btree_cur_t *cur,
- int level,
- int *stat) /* success/failure */
-{
- int error; /* error return value */
-#ifdef DEBUG
- int i; /* loop counter */
-#endif
- xfs_bmbt_key_t key; /* bmap btree key */
- xfs_buf_t *lbp; /* left buffer pointer */
- xfs_bmbt_block_t *left; /* left btree block */
- xfs_bmbt_key_t *lkp=NULL; /* left btree key */
- xfs_bmbt_ptr_t *lpp; /* left address pointer */
- int lrecs; /* left record count */
- xfs_bmbt_rec_t *lrp=NULL; /* left record pointer */
- xfs_mount_t *mp; /* file system mount point */
- xfs_buf_t *rbp; /* right buffer pointer */
- xfs_bmbt_block_t *right; /* right btree block */
- xfs_bmbt_key_t *rkp=NULL; /* right btree key */
- xfs_bmbt_ptr_t *rpp=NULL; /* right address pointer */
- xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */
- int rrecs; /* right record count */
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGI(cur, level);
- if (level == cur->bc_nlevels - 1) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- rbp = cur->bc_bufs[level];
- right = XFS_BUF_TO_BMBT_BLOCK(rbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- if (be64_to_cpu(right->bb_leftsib) == NULLDFSBNO) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- if (cur->bc_ptrs[level] <= 1) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- mp = cur->bc_mp;
- if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(right->bb_leftsib), 0,
- &lbp, XFS_BMAP_BTREE_REF))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- left = XFS_BUF_TO_BMBT_BLOCK(lbp);
- if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- if (be16_to_cpu(left->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- lrecs = be16_to_cpu(left->bb_numrecs) + 1;
- if (level > 0) {
- lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur);
- rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
- *lkp = *rkp;
- xfs_bmbt_log_keys(cur, lbp, lrecs, lrecs);
- lpp = XFS_BMAP_PTR_IADDR(left, lrecs, cur);
- rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lptr_disk(cur, *rpp, level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- *lpp = *rpp;
- xfs_bmbt_log_ptrs(cur, lbp, lrecs, lrecs);
- } else {
- lrp = XFS_BMAP_REC_IADDR(left, lrecs, cur);
- rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
- *lrp = *rrp;
- xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs);
- }
- left->bb_numrecs = cpu_to_be16(lrecs);
- xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
-#ifdef DEBUG
- if (level > 0)
- xfs_btree_check_key(XFS_BTNUM_BMAP, lkp - 1, lkp);
- else
- xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp);
-#endif
- rrecs = be16_to_cpu(right->bb_numrecs) - 1;
- right->bb_numrecs = cpu_to_be16(rrecs);
- xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS);
- if (level > 0) {
-#ifdef DEBUG
- for (i = 0; i < rrecs; i++) {
- if ((error = xfs_btree_check_lptr_disk(cur, rpp[i + 1],
- level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- }
-#endif
- memmove(rkp, rkp + 1, rrecs * sizeof(*rkp));
- memmove(rpp, rpp + 1, rrecs * sizeof(*rpp));
- xfs_bmbt_log_keys(cur, rbp, 1, rrecs);
- xfs_bmbt_log_ptrs(cur, rbp, 1, rrecs);
- } else {
- memmove(rrp, rrp + 1, rrecs * sizeof(*rrp));
- xfs_bmbt_log_recs(cur, rbp, 1, rrecs);
- key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
- rkp = &key;
- }
- if ((error = xfs_bmbt_updkey(cur, rkp, level + 1))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- cur->bc_ptrs[level]--;
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
-}
-
-/*
- * Move 1 record right from cur/level if possible.
- * Update cur to reflect the new path.
- */
-STATIC int /* error */
-xfs_bmbt_rshift(
- xfs_btree_cur_t *cur,
- int level,
- int *stat) /* success/failure */
-{
- int error; /* error return value */
- int i; /* loop counter */
- xfs_bmbt_key_t key; /* bmap btree key */
- xfs_buf_t *lbp; /* left buffer pointer */
- xfs_bmbt_block_t *left; /* left btree block */
- xfs_bmbt_key_t *lkp; /* left btree key */
- xfs_bmbt_ptr_t *lpp; /* left address pointer */
- xfs_bmbt_rec_t *lrp; /* left record pointer */
- xfs_mount_t *mp; /* file system mount point */
- xfs_buf_t *rbp; /* right buffer pointer */
- xfs_bmbt_block_t *right; /* right btree block */
- xfs_bmbt_key_t *rkp; /* right btree key */
- xfs_bmbt_ptr_t *rpp; /* right address pointer */
- xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */
- struct xfs_btree_cur *tcur; /* temporary btree cursor */
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGI(cur, level);
- if (level == cur->bc_nlevels - 1) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- lbp = cur->bc_bufs[level];
- left = XFS_BUF_TO_BMBT_BLOCK(lbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- if (be64_to_cpu(left->bb_rightsib) == NULLDFSBNO) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- mp = cur->bc_mp;
- if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(left->bb_rightsib), 0,
- &rbp, XFS_BMAP_BTREE_REF))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- right = XFS_BUF_TO_BMBT_BLOCK(rbp);
- if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- if (be16_to_cpu(right->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- if (level > 0) {
- lkp = XFS_BMAP_KEY_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
- lpp = XFS_BMAP_PTR_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
- rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
- rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
-#ifdef DEBUG
- for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) {
- if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- }
-#endif
- memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
- memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
-#ifdef DEBUG
- if ((error = xfs_btree_check_lptr_disk(cur, *lpp, level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- *rkp = *lkp;
- *rpp = *lpp;
- xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
- xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
- } else {
- lrp = XFS_BMAP_REC_IADDR(left, be16_to_cpu(left->bb_numrecs), cur);
- rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
- memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
- *rrp = *lrp;
- xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1);
- key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
- rkp = &key;
- }
- be16_add_cpu(&left->bb_numrecs, -1);
- xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
- be16_add_cpu(&right->bb_numrecs, 1);
-#ifdef DEBUG
- if (level > 0)
- xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1);
- else
- xfs_btree_check_rec(XFS_BTNUM_BMAP, rrp, rrp + 1);
-#endif
- xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS);
- if ((error = xfs_btree_dup_cursor(cur, &tcur))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_bmbt_increment(tcur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(tcur, ERROR);
- goto error1;
- }
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if ((error = xfs_bmbt_updkey(tcur, rkp, level + 1))) {
- XFS_BMBT_TRACE_CURSOR(tcur, ERROR);
- goto error1;
- }
- xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
-error0:
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
-error1:
- xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
- return error;
-}
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_dinode.h"
/*
* Determine the extent state.
@@ -1453,340 +54,52 @@ xfs_extent_state(
return XFS_EXT_NORM;
}
-
-/*
- * Split cur/level block in half.
- * Return new block number and its first record (to be inserted into parent).
- */
-STATIC int /* error */
-xfs_bmbt_split(
- xfs_btree_cur_t *cur,
- int level,
- xfs_fsblock_t *bnop,
- __uint64_t *startoff,
- xfs_btree_cur_t **curp,
- int *stat) /* success/failure */
-{
- xfs_alloc_arg_t args; /* block allocation args */
- int error; /* error return value */
- int i; /* loop counter */
- xfs_fsblock_t lbno; /* left sibling block number */
- xfs_buf_t *lbp; /* left buffer pointer */
- xfs_bmbt_block_t *left; /* left btree block */
- xfs_bmbt_key_t *lkp; /* left btree key */
- xfs_bmbt_ptr_t *lpp; /* left address pointer */
- xfs_bmbt_rec_t *lrp; /* left record pointer */
- xfs_buf_t *rbp; /* right buffer pointer */
- xfs_bmbt_block_t *right; /* right btree block */
- xfs_bmbt_key_t *rkp; /* right btree key */
- xfs_bmbt_ptr_t *rpp; /* right address pointer */
- xfs_bmbt_block_t *rrblock; /* right-right btree block */
- xfs_buf_t *rrbp; /* right-right buffer pointer */
- xfs_bmbt_rec_t *rrp; /* right record pointer */
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, *startoff);
- args.tp = cur->bc_tp;
- args.mp = cur->bc_mp;
- lbp = cur->bc_bufs[level];
- lbno = XFS_DADDR_TO_FSB(args.mp, XFS_BUF_ADDR(lbp));
- left = XFS_BUF_TO_BMBT_BLOCK(lbp);
- args.fsbno = cur->bc_private.b.firstblock;
- args.firstblock = args.fsbno;
- if (args.fsbno == NULLFSBLOCK) {
- args.fsbno = lbno;
- args.type = XFS_ALLOCTYPE_START_BNO;
- } else
- args.type = XFS_ALLOCTYPE_NEAR_BNO;
- args.mod = args.minleft = args.alignment = args.total = args.isfl =
- args.userdata = args.minalignslop = 0;
- args.minlen = args.maxlen = args.prod = 1;
- args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
- if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return XFS_ERROR(ENOSPC);
- }
- if ((error = xfs_alloc_vextent(&args))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- if (args.fsbno == NULLFSBLOCK) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- ASSERT(args.len == 1);
- cur->bc_private.b.firstblock = args.fsbno;
- cur->bc_private.b.allocated++;
- cur->bc_private.b.ip->i_d.di_nblocks++;
- xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
- XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
- XFS_TRANS_DQ_BCOUNT, 1L);
- rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0);
- right = XFS_BUF_TO_BMBT_BLOCK(rbp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, left, level, rbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- right->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
- right->bb_level = left->bb_level;
- right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
- if ((be16_to_cpu(left->bb_numrecs) & 1) &&
- cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
- be16_add_cpu(&right->bb_numrecs, 1);
- i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
- if (level > 0) {
- lkp = XFS_BMAP_KEY_IADDR(left, i, cur);
- lpp = XFS_BMAP_PTR_IADDR(left, i, cur);
- rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
- rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
-#ifdef DEBUG
- for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
- if ((error = xfs_btree_check_lptr_disk(cur, lpp[i], level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- }
-#endif
- memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
- memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
- xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
- xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
- *startoff = be64_to_cpu(rkp->br_startoff);
- } else {
- lrp = XFS_BMAP_REC_IADDR(left, i, cur);
- rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
- memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
- xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
- *startoff = xfs_bmbt_disk_get_startoff(rrp);
- }
- be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
- right->bb_rightsib = left->bb_rightsib;
- left->bb_rightsib = cpu_to_be64(args.fsbno);
- right->bb_leftsib = cpu_to_be64(lbno);
- xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS);
- xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
- if (be64_to_cpu(right->bb_rightsib) != NULLDFSBNO) {
- if ((error = xfs_btree_read_bufl(args.mp, args.tp,
- be64_to_cpu(right->bb_rightsib), 0, &rrbp,
- XFS_BMAP_BTREE_REF))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
- if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- rrblock->bb_leftsib = cpu_to_be64(args.fsbno);
- xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
- }
- if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
- xfs_btree_setbuf(cur, level, rbp);
- cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
- }
- if (level + 1 < cur->bc_nlevels) {
- if ((error = xfs_btree_dup_cursor(cur, curp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- (*curp)->bc_ptrs[level + 1]++;
- }
- *bnop = args.fsbno;
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
-}
-
-
-/*
- * Update keys for the record.
- */
-STATIC int
-xfs_bmbt_updkey(
- xfs_btree_cur_t *cur,
- xfs_bmbt_key_t *keyp, /* on-disk format */
- int level)
-{
- xfs_bmbt_block_t *block;
- xfs_buf_t *bp;
-#ifdef DEBUG
- int error;
-#endif
- xfs_bmbt_key_t *kp;
- int ptr;
-
- ASSERT(level >= 1);
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGIK(cur, level, keyp);
- for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
- block = xfs_bmbt_get_block(cur, level, &bp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- ptr = cur->bc_ptrs[level];
- kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
- *kp = *keyp;
- xfs_bmbt_log_keys(cur, bp, ptr, ptr);
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- return 0;
-}
-
/*
* Convert on-disk form of btree root to in-memory form.
*/
void
xfs_bmdr_to_bmbt(
+ struct xfs_inode *ip,
xfs_bmdr_block_t *dblock,
int dblocklen,
- xfs_bmbt_block_t *rblock,
+ struct xfs_btree_block *rblock,
int rblocklen)
{
+ struct xfs_mount *mp = ip->i_mount;
int dmxr;
xfs_bmbt_key_t *fkp;
__be64 *fpp;
xfs_bmbt_key_t *tkp;
__be64 *tpp;
- rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
+ XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
+ XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
+ else
+ xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
+ XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
+ XFS_BTREE_LONG_PTRS);
+
rblock->bb_level = dblock->bb_level;
ASSERT(be16_to_cpu(rblock->bb_level) > 0);
rblock->bb_numrecs = dblock->bb_numrecs;
- rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
- rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
- dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
- fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
- tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
- fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
- tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
+ dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
+ fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
+ tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
+ fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
+ tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
dmxr = be16_to_cpu(dblock->bb_numrecs);
memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
}
/*
- * Decrement cursor by one record at the level.
- * For nonzero levels the leaf-ward information is untouched.
- */
-int /* error */
-xfs_bmbt_decrement(
- xfs_btree_cur_t *cur,
- int level,
- int *stat) /* success/failure */
-{
- xfs_bmbt_block_t *block;
- xfs_buf_t *bp;
- int error; /* error return value */
- xfs_fsblock_t fsbno;
- int lev;
- xfs_mount_t *mp;
- xfs_trans_t *tp;
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGI(cur, level);
- ASSERT(level < cur->bc_nlevels);
- if (level < cur->bc_nlevels - 1)
- xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
- if (--cur->bc_ptrs[level] > 0) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
- }
- block = xfs_bmbt_get_block(cur, level, &bp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- if (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
- if (--cur->bc_ptrs[lev] > 0)
- break;
- if (lev < cur->bc_nlevels - 1)
- xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
- }
- if (lev == cur->bc_nlevels) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- tp = cur->bc_tp;
- mp = cur->bc_mp;
- for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) {
- fsbno = be64_to_cpu(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur));
- if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp,
- XFS_BMAP_BTREE_REF))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- lev--;
- xfs_btree_setbuf(cur, lev, bp);
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs);
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
-}
-
-/*
- * Delete the record pointed to by cur.
- */
-int /* error */
-xfs_bmbt_delete(
- xfs_btree_cur_t *cur,
- int *stat) /* success/failure */
-{
- int error; /* error return value */
- int i;
- int level;
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- for (level = 0, i = 2; i == 2; level++) {
- if ((error = xfs_bmbt_delrec(cur, level, &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- }
- if (i == 0) {
- for (level = 1; level < cur->bc_nlevels; level++) {
- if (cur->bc_ptrs[level] == 0) {
- if ((error = xfs_bmbt_decrement(cur, level,
- &i))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- break;
- }
- }
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = i;
- return 0;
-}
-
-/*
* Convert a compressed bmap extent record to an uncompressed form.
* This code must be in sync with the routines xfs_bmbt_get_startoff,
* xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
*/
-
-STATIC_INLINE void
+STATIC void
__xfs_bmbt_get_all(
__uint64_t l0,
__uint64_t l1,
@@ -1797,25 +110,25 @@ __xfs_bmbt_get_all(
ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
s->br_startoff = ((xfs_fileoff_t)l0 &
- XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+ xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
#if XFS_BIG_BLKNOS
- s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) |
+ s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
(((xfs_fsblock_t)l1) >> 21);
#else
#ifdef DEBUG
{
xfs_dfsbno_t b;
- b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) |
+ b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) |
(((xfs_dfsbno_t)l1) >> 21);
- ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
+ ASSERT((b >> 32) == 0 || isnulldstartblock(b));
s->br_startblock = (xfs_fsblock_t)b;
}
#else /* !DEBUG */
s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
#endif /* DEBUG */
#endif /* XFS_BIG_BLKNOS */
- s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21));
+ s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
/* This is xfs_extent_state() in-line */
if (ext_flag) {
ASSERT(s->br_blockcount != 0); /* saved for DMIG */
@@ -1834,38 +147,13 @@ xfs_bmbt_get_all(
}
/*
- * Get the block pointer for the given level of the cursor.
- * Fill in the buffer pointer, if applicable.
- */
-xfs_bmbt_block_t *
-xfs_bmbt_get_block(
- xfs_btree_cur_t *cur,
- int level,
- xfs_buf_t **bpp)
-{
- xfs_ifork_t *ifp;
- xfs_bmbt_block_t *rval;
-
- if (level < cur->bc_nlevels - 1) {
- *bpp = cur->bc_bufs[level];
- rval = XFS_BUF_TO_BMBT_BLOCK(*bpp);
- } else {
- *bpp = NULL;
- ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
- cur->bc_private.b.whichfork);
- rval = ifp->if_broot;
- }
- return rval;
-}
-
-/*
* Extract the blockcount field from an in memory bmap extent record.
*/
xfs_filblks_t
xfs_bmbt_get_blockcount(
xfs_bmbt_rec_host_t *r)
{
- return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21));
+ return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
}
/*
@@ -1876,15 +164,15 @@ xfs_bmbt_get_startblock(
xfs_bmbt_rec_host_t *r)
{
#if XFS_BIG_BLKNOS
- return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) |
+ return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
(((xfs_fsblock_t)r->l1) >> 21);
#else
#ifdef DEBUG
xfs_dfsbno_t b;
- b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) |
+ b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) |
(((xfs_dfsbno_t)r->l1) >> 21);
- ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
+ ASSERT((b >> 32) == 0 || isnulldstartblock(b));
return (xfs_fsblock_t)b;
#else /* !DEBUG */
return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
@@ -1900,7 +188,7 @@ xfs_bmbt_get_startoff(
xfs_bmbt_rec_host_t *r)
{
return ((xfs_fileoff_t)r->l0 &
- XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+ xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
}
xfs_exntst_t
@@ -1914,15 +202,6 @@ xfs_bmbt_get_state(
ext_flag);
}
-/* Endian flipping versions of the bmbt extraction functions */
-void
-xfs_bmbt_disk_get_all(
- xfs_bmbt_rec_t *r,
- xfs_bmbt_irec_t *s)
-{
- __xfs_bmbt_get_all(be64_to_cpu(r->l0), be64_to_cpu(r->l1), s);
-}
-
/*
* Extract the blockcount field from an on disk bmap extent record.
*/
@@ -1930,7 +209,7 @@ xfs_filblks_t
xfs_bmbt_disk_get_blockcount(
xfs_bmbt_rec_t *r)
{
- return (xfs_filblks_t)(be64_to_cpu(r->l1) & XFS_MASK64LO(21));
+ return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
}
/*
@@ -1941,379 +220,9 @@ xfs_bmbt_disk_get_startoff(
xfs_bmbt_rec_t *r)
{
return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
- XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+ xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
}
-/*
- * Increment cursor by one record at the level.
- * For nonzero levels the leaf-ward information is untouched.
- */
-int /* error */
-xfs_bmbt_increment(
- xfs_btree_cur_t *cur,
- int level,
- int *stat) /* success/failure */
-{
- xfs_bmbt_block_t *block;
- xfs_buf_t *bp;
- int error; /* error return value */
- xfs_fsblock_t fsbno;
- int lev;
- xfs_mount_t *mp;
- xfs_trans_t *tp;
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGI(cur, level);
- ASSERT(level < cur->bc_nlevels);
- if (level < cur->bc_nlevels - 1)
- xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
- block = xfs_bmbt_get_block(cur, level, &bp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
- }
- if (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
- block = xfs_bmbt_get_block(cur, lev, &bp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs))
- break;
- if (lev < cur->bc_nlevels - 1)
- xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
- }
- if (lev == cur->bc_nlevels) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- tp = cur->bc_tp;
- mp = cur->bc_mp;
- for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) {
- fsbno = be64_to_cpu(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur));
- if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp,
- XFS_BMAP_BTREE_REF))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- lev--;
- xfs_btree_setbuf(cur, lev, bp);
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- cur->bc_ptrs[lev] = 1;
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 1;
- return 0;
-}
-
-/*
- * Insert the current record at the point referenced by cur.
- *
- * A multi-level split of the tree on insert will invalidate the original
- * cursor. It appears, however, that some callers assume that the cursor is
- * always valid. Hence if we do a multi-level split we need to revalidate the
- * cursor.
- *
- * When a split occurs, we will see a new cursor returned. Use that as a
- * trigger to determine if we need to revalidate the original cursor. If we get
- * a split, then use the original irec to lookup up the path of the record we
- * just inserted.
- *
- * Note that the fact that the btree root is in the inode means that we can
- * have the level of the tree change without a "split" occurring at the root
- * level. What happens is that the root is migrated to an allocated block and
- * the inode root is pointed to it. This means a single split can change the
- * level of the tree (level 2 -> level 3) and invalidate the old cursor. Hence
- * the level change should be accounted as a split so as to correctly trigger a
- * revalidation of the old cursor.
- */
-int /* error */
-xfs_bmbt_insert(
- xfs_btree_cur_t *cur,
- int *stat) /* success/failure */
-{
- int error; /* error return value */
- int i;
- int level;
- xfs_fsblock_t nbno;
- xfs_btree_cur_t *ncur;
- xfs_bmbt_rec_t nrec;
- xfs_bmbt_irec_t oirec; /* original irec */
- xfs_btree_cur_t *pcur;
- int splits = 0;
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- level = 0;
- nbno = NULLFSBLOCK;
- oirec = cur->bc_rec.b;
- xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b);
- ncur = NULL;
- pcur = cur;
- do {
- if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur,
- &i))) {
- if (pcur != cur)
- xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
- goto error0;
- }
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) {
- /* allocating a new root is effectively a split */
- if (cur->bc_nlevels != pcur->bc_nlevels)
- splits++;
- cur->bc_nlevels = pcur->bc_nlevels;
- cur->bc_private.b.allocated +=
- pcur->bc_private.b.allocated;
- pcur->bc_private.b.allocated = 0;
- ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) ||
- XFS_IS_REALTIME_INODE(cur->bc_private.b.ip));
- cur->bc_private.b.firstblock =
- pcur->bc_private.b.firstblock;
- ASSERT(cur->bc_private.b.flist ==
- pcur->bc_private.b.flist);
- xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
- }
- if (ncur) {
- splits++;
- pcur = ncur;
- ncur = NULL;
- }
- } while (nbno != NULLFSBLOCK);
-
- if (splits > 1) {
- /* revalidate the old cursor as we had a multi-level split */
- error = xfs_bmbt_lookup_eq(cur, oirec.br_startoff,
- oirec.br_startblock, oirec.br_blockcount, &i);
- if (error)
- goto error0;
- ASSERT(i == 1);
- }
-
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = i;
- return 0;
-error0:
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
-}
-
-/*
- * Log fields from the btree block header.
- */
-void
-xfs_bmbt_log_block(
- xfs_btree_cur_t *cur,
- xfs_buf_t *bp,
- int fields)
-{
- int first;
- int last;
- xfs_trans_t *tp;
- static const short offsets[] = {
- offsetof(xfs_bmbt_block_t, bb_magic),
- offsetof(xfs_bmbt_block_t, bb_level),
- offsetof(xfs_bmbt_block_t, bb_numrecs),
- offsetof(xfs_bmbt_block_t, bb_leftsib),
- offsetof(xfs_bmbt_block_t, bb_rightsib),
- sizeof(xfs_bmbt_block_t)
- };
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGBI(cur, bp, fields);
- tp = cur->bc_tp;
- if (bp) {
- xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first,
- &last);
- xfs_trans_log_buf(tp, bp, first, last);
- } else
- xfs_trans_log_inode(tp, cur->bc_private.b.ip,
- XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
-}
-
-/*
- * Log record values from the btree block.
- */
-void
-xfs_bmbt_log_recs(
- xfs_btree_cur_t *cur,
- xfs_buf_t *bp,
- int rfirst,
- int rlast)
-{
- xfs_bmbt_block_t *block;
- int first;
- int last;
- xfs_bmbt_rec_t *rp;
- xfs_trans_t *tp;
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGBII(cur, bp, rfirst, rlast);
- ASSERT(bp);
- tp = cur->bc_tp;
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
- rp = XFS_BMAP_REC_DADDR(block, 1, cur);
- first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
- last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
- xfs_trans_log_buf(tp, bp, first, last);
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
-}
-
-int /* error */
-xfs_bmbt_lookup_eq(
- xfs_btree_cur_t *cur,
- xfs_fileoff_t off,
- xfs_fsblock_t bno,
- xfs_filblks_t len,
- int *stat) /* success/failure */
-{
- cur->bc_rec.b.br_startoff = off;
- cur->bc_rec.b.br_startblock = bno;
- cur->bc_rec.b.br_blockcount = len;
- return xfs_bmbt_lookup(cur, XFS_LOOKUP_EQ, stat);
-}
-
-int /* error */
-xfs_bmbt_lookup_ge(
- xfs_btree_cur_t *cur,
- xfs_fileoff_t off,
- xfs_fsblock_t bno,
- xfs_filblks_t len,
- int *stat) /* success/failure */
-{
- cur->bc_rec.b.br_startoff = off;
- cur->bc_rec.b.br_startblock = bno;
- cur->bc_rec.b.br_blockcount = len;
- return xfs_bmbt_lookup(cur, XFS_LOOKUP_GE, stat);
-}
-
-/*
- * Give the bmap btree a new root block. Copy the old broot contents
- * down into a real block and make the broot point to it.
- */
-int /* error */
-xfs_bmbt_newroot(
- xfs_btree_cur_t *cur, /* btree cursor */
- int *logflags, /* logging flags for inode */
- int *stat) /* return status - 0 fail */
-{
- xfs_alloc_arg_t args; /* allocation arguments */
- xfs_bmbt_block_t *block; /* bmap btree block */
- xfs_buf_t *bp; /* buffer for block */
- xfs_bmbt_block_t *cblock; /* child btree block */
- xfs_bmbt_key_t *ckp; /* child key pointer */
- xfs_bmbt_ptr_t *cpp; /* child ptr pointer */
- int error; /* error return code */
-#ifdef DEBUG
- int i; /* loop counter */
-#endif
- xfs_bmbt_key_t *kp; /* pointer to bmap btree key */
- int level; /* btree level */
- xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- level = cur->bc_nlevels - 1;
- block = xfs_bmbt_get_block(cur, level, &bp);
- /*
- * Copy the root into a real block.
- */
- args.mp = cur->bc_mp;
- pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
- args.tp = cur->bc_tp;
- args.fsbno = cur->bc_private.b.firstblock;
- args.mod = args.minleft = args.alignment = args.total = args.isfl =
- args.userdata = args.minalignslop = 0;
- args.minlen = args.maxlen = args.prod = 1;
- args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
- args.firstblock = args.fsbno;
- if (args.fsbno == NULLFSBLOCK) {
-#ifdef DEBUG
- if ((error = xfs_btree_check_lptr_disk(cur, *pp, level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- args.fsbno = be64_to_cpu(*pp);
- args.type = XFS_ALLOCTYPE_START_BNO;
- } else
- args.type = XFS_ALLOCTYPE_NEAR_BNO;
- if ((error = xfs_alloc_vextent(&args))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- if (args.fsbno == NULLFSBLOCK) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *stat = 0;
- return 0;
- }
- ASSERT(args.len == 1);
- cur->bc_private.b.firstblock = args.fsbno;
- cur->bc_private.b.allocated++;
- cur->bc_private.b.ip->i_d.di_nblocks++;
- XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
- XFS_TRANS_DQ_BCOUNT, 1L);
- bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0);
- cblock = XFS_BUF_TO_BMBT_BLOCK(bp);
- *cblock = *block;
- be16_add_cpu(&block->bb_level, 1);
- block->bb_numrecs = cpu_to_be16(1);
- cur->bc_nlevels++;
- cur->bc_ptrs[level + 1] = 1;
- kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
- ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
- memcpy(ckp, kp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*kp));
- cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur);
-#ifdef DEBUG
- for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
- if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- }
-#endif
- memcpy(cpp, pp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*pp));
-#ifdef DEBUG
- if ((error = xfs_btree_check_lptr(cur, args.fsbno, level))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- *pp = cpu_to_be64(args.fsbno);
- xfs_iroot_realloc(cur->bc_private.b.ip, 1 - be16_to_cpu(cblock->bb_numrecs),
- cur->bc_private.b.whichfork);
- xfs_btree_setbuf(cur, level, bp);
- /*
- * Do all this logging at the end so that
- * the root is at the right level.
- */
- xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS);
- xfs_bmbt_log_keys(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs));
- xfs_bmbt_log_ptrs(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs));
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- *logflags |=
- XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork);
- *stat = 1;
- return 0;
-}
/*
* Set all the fields in a bmap extent record from the arguments.
@@ -2329,33 +238,33 @@ xfs_bmbt_set_allf(
int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
- ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
- ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
+ ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
+ ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
#if XFS_BIG_BLKNOS
- ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
+ ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9) |
((xfs_bmbt_rec_base_t)startblock >> 43);
r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount &
- (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+ (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
#else /* !XFS_BIG_BLKNOS */
- if (ISNULLSTARTBLOCK(startblock)) {
+ if (isnullstartblock(startblock)) {
r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9) |
- (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
- r->l1 = XFS_MASK64HI(11) |
+ (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
+ r->l1 = xfs_mask64hi(11) |
((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount &
- (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+ (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
} else {
r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9);
r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount &
- (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+ (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
}
#endif /* XFS_BIG_BLKNOS */
}
@@ -2387,11 +296,11 @@ xfs_bmbt_disk_set_allf(
int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
- ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
- ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
+ ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
+ ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
#if XFS_BIG_BLKNOS
- ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
+ ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
r->l0 = cpu_to_be64(
((xfs_bmbt_rec_base_t)extent_flag << 63) |
@@ -2400,17 +309,17 @@ xfs_bmbt_disk_set_allf(
r->l1 = cpu_to_be64(
((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount &
- (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+ (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
#else /* !XFS_BIG_BLKNOS */
- if (ISNULLSTARTBLOCK(startblock)) {
+ if (isnullstartblock(startblock)) {
r->l0 = cpu_to_be64(
((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9) |
- (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
- r->l1 = cpu_to_be64(XFS_MASK64HI(11) |
+ (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
+ r->l1 = cpu_to_be64(xfs_mask64hi(11) |
((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount &
- (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+ (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
} else {
r->l0 = cpu_to_be64(
((xfs_bmbt_rec_base_t)extent_flag << 63) |
@@ -2418,7 +327,7 @@ xfs_bmbt_disk_set_allf(
r->l1 = cpu_to_be64(
((xfs_bmbt_rec_base_t)startblock << 21) |
((xfs_bmbt_rec_base_t)blockcount &
- (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+ (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
}
#endif /* XFS_BIG_BLKNOS */
}
@@ -2426,7 +335,7 @@ xfs_bmbt_disk_set_allf(
/*
* Set all the fields in a bmap extent record from the uncompressed form.
*/
-void
+STATIC void
xfs_bmbt_disk_set_all(
xfs_bmbt_rec_t *r,
xfs_bmbt_irec_t *s)
@@ -2443,9 +352,9 @@ xfs_bmbt_set_blockcount(
xfs_bmbt_rec_host_t *r,
xfs_filblks_t v)
{
- ASSERT((v & XFS_MASK64HI(43)) == 0);
- r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
- (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21));
+ ASSERT((v & xfs_mask64hi(43)) == 0);
+ r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) |
+ (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21));
}
/*
@@ -2457,21 +366,21 @@ xfs_bmbt_set_startblock(
xfs_fsblock_t v)
{
#if XFS_BIG_BLKNOS
- ASSERT((v & XFS_MASK64HI(12)) == 0);
- r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
+ ASSERT((v & xfs_mask64hi(12)) == 0);
+ r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) |
(xfs_bmbt_rec_base_t)(v >> 43);
- r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
+ r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
(xfs_bmbt_rec_base_t)(v << 21);
#else /* !XFS_BIG_BLKNOS */
- if (ISNULLSTARTBLOCK(v)) {
- r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
- r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
+ if (isnullstartblock(v)) {
+ r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
+ r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) |
((xfs_bmbt_rec_base_t)v << 21) |
- (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+ (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
} else {
- r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
+ r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9);
r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
- (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+ (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
}
#endif /* XFS_BIG_BLKNOS */
}
@@ -2484,10 +393,10 @@ xfs_bmbt_set_startoff(
xfs_bmbt_rec_host_t *r,
xfs_fileoff_t v)
{
- ASSERT((v & XFS_MASK64HI(9)) == 0);
- r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
+ ASSERT((v & xfs_mask64hi(9)) == 0);
+ r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) |
((xfs_bmbt_rec_base_t)v << 9) |
- (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
+ (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
}
/*
@@ -2500,9 +409,9 @@ xfs_bmbt_set_state(
{
ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
if (v == XFS_EXT_NORM)
- r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN);
+ r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN);
else
- r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN);
+ r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN);
}
/*
@@ -2510,7 +419,8 @@ xfs_bmbt_set_state(
*/
void
xfs_bmbt_to_bmdr(
- xfs_bmbt_block_t *rblock,
+ struct xfs_mount *mp,
+ struct xfs_btree_block *rblock,
int rblocklen,
xfs_bmdr_block_t *dblock,
int dblocklen)
@@ -2521,68 +431,29 @@ xfs_bmbt_to_bmdr(
xfs_bmbt_key_t *tkp;
__be64 *tpp;
- ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
- ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO);
- ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO);
- ASSERT(be16_to_cpu(rblock->bb_level) > 0);
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
+ ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid));
+ ASSERT(rblock->bb_u.l.bb_blkno ==
+ cpu_to_be64(XFS_BUF_DADDR_NULL));
+ } else
+ ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
+ ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO));
+ ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO));
+ ASSERT(rblock->bb_level != 0);
dblock->bb_level = rblock->bb_level;
dblock->bb_numrecs = rblock->bb_numrecs;
- dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
- fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
- tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
- fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
- tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
+ dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
+ fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
+ tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
+ fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
+ tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
dmxr = be16_to_cpu(dblock->bb_numrecs);
memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
}
/*
- * Update the record to the passed values.
- */
-int
-xfs_bmbt_update(
- xfs_btree_cur_t *cur,
- xfs_fileoff_t off,
- xfs_fsblock_t bno,
- xfs_filblks_t len,
- xfs_exntst_t state)
-{
- xfs_bmbt_block_t *block;
- xfs_buf_t *bp;
- int error;
- xfs_bmbt_key_t key;
- int ptr;
- xfs_bmbt_rec_t *rp;
-
- XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
- XFS_BMBT_TRACE_ARGFFFI(cur, (xfs_dfiloff_t)off, (xfs_dfsbno_t)bno,
- (xfs_dfilblks_t)len, (int)state);
- block = xfs_bmbt_get_block(cur, 0, &bp);
-#ifdef DEBUG
- if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
-#endif
- ptr = cur->bc_ptrs[0];
- rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
- xfs_bmbt_disk_set_allf(rp, off, bno, len, state);
- xfs_bmbt_log_recs(cur, bp, ptr, ptr);
- if (ptr > 1) {
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- return 0;
- }
- key.br_startoff = cpu_to_be64(off);
- if ((error = xfs_bmbt_updkey(cur, &key, 1))) {
- XFS_BMBT_TRACE_CURSOR(cur, ERROR);
- return error;
- }
- XFS_BMBT_TRACE_CURSOR(cur, EXIT);
- return 0;
-}
-
-/*
* Check extent records, which have just been read, for
* any bit in the extent flag field. ASSERT on debug
* kernels, as this condition should not occur.
@@ -2606,3 +477,491 @@ xfs_check_nostate_extents(
}
return 0;
}
+
+
+STATIC struct xfs_btree_cur *
+xfs_bmbt_dup_cursor(
+ struct xfs_btree_cur *cur)
+{
+ struct xfs_btree_cur *new;
+
+ new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
+ cur->bc_private.b.ip, cur->bc_private.b.whichfork);
+
+ /*
+ * Copy the firstblock, flist, and flags values,
+ * since init cursor doesn't get them.
+ */
+ new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
+ new->bc_private.b.flist = cur->bc_private.b.flist;
+ new->bc_private.b.flags = cur->bc_private.b.flags;
+
+ return new;
+}
+
+STATIC void
+xfs_bmbt_update_cursor(
+ struct xfs_btree_cur *src,
+ struct xfs_btree_cur *dst)
+{
+ ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
+ (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
+ ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
+
+ dst->bc_private.b.allocated += src->bc_private.b.allocated;
+ dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
+
+ src->bc_private.b.allocated = 0;
+}
+
+STATIC int
+xfs_bmbt_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
+{
+ xfs_alloc_arg_t args; /* block allocation args */
+ int error; /* error return value */
+
+ memset(&args, 0, sizeof(args));
+ args.tp = cur->bc_tp;
+ args.mp = cur->bc_mp;
+ args.fsbno = cur->bc_private.b.firstblock;
+ args.firstblock = args.fsbno;
+
+ if (args.fsbno == NULLFSBLOCK) {
+ args.fsbno = be64_to_cpu(start->l);
+ args.type = XFS_ALLOCTYPE_START_BNO;
+ /*
+ * Make sure there is sufficient room left in the AG to
+ * complete a full tree split for an extent insert. If
+ * we are converting the middle part of an extent then
+ * we may need space for two tree splits.
+ *
+ * We are relying on the caller to make the correct block
+ * reservation for this operation to succeed. If the
+ * reservation amount is insufficient then we may fail a
+ * block allocation here and corrupt the filesystem.
+ */
+ args.minleft = xfs_trans_get_block_res(args.tp);
+ } else if (cur->bc_private.b.flist->xbf_low) {
+ args.type = XFS_ALLOCTYPE_START_BNO;
+ } else {
+ args.type = XFS_ALLOCTYPE_NEAR_BNO;
+ }
+
+ args.minlen = args.maxlen = args.prod = 1;
+ args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
+ if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
+ error = XFS_ERROR(ENOSPC);
+ goto error0;
+ }
+ error = xfs_alloc_vextent(&args);
+ if (error)
+ goto error0;
+
+ if (args.fsbno == NULLFSBLOCK && args.minleft) {
+ /*
+ * Could not find an AG with enough free space to satisfy
+ * a full btree split. Try again without minleft and if
+ * successful activate the lowspace algorithm.
+ */
+ args.fsbno = 0;
+ args.type = XFS_ALLOCTYPE_FIRST_AG;
+ args.minleft = 0;
+ error = xfs_alloc_vextent(&args);
+ if (error)
+ goto error0;
+ cur->bc_private.b.flist->xbf_low = 1;
+ }
+ if (args.fsbno == NULLFSBLOCK) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+ }
+ ASSERT(args.len == 1);
+ cur->bc_private.b.firstblock = args.fsbno;
+ cur->bc_private.b.allocated++;
+ cur->bc_private.b.ip->i_d.di_nblocks++;
+ xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
+ xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
+ XFS_TRANS_DQ_BCOUNT, 1L);
+
+ new->l = cpu_to_be64(args.fsbno);
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+
+ error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+STATIC int
+xfs_bmbt_free_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_inode *ip = cur->bc_private.b.ip;
+ struct xfs_trans *tp = cur->bc_tp;
+ xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
+
+ xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp);
+ ip->i_d.di_nblocks--;
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
+ xfs_trans_binval(tp, bp);
+ return 0;
+}
+
+STATIC int
+xfs_bmbt_get_minrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ if (level == cur->bc_nlevels - 1) {
+ struct xfs_ifork *ifp;
+
+ ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
+ cur->bc_private.b.whichfork);
+
+ return xfs_bmbt_maxrecs(cur->bc_mp,
+ ifp->if_broot_bytes, level == 0) / 2;
+ }
+
+ return cur->bc_mp->m_bmap_dmnr[level != 0];
+}
+
+int
+xfs_bmbt_get_maxrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ if (level == cur->bc_nlevels - 1) {
+ struct xfs_ifork *ifp;
+
+ ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
+ cur->bc_private.b.whichfork);
+
+ return xfs_bmbt_maxrecs(cur->bc_mp,
+ ifp->if_broot_bytes, level == 0);
+ }
+
+ return cur->bc_mp->m_bmap_dmxr[level != 0];
+
+}
+
+/*
+ * Get the maximum records we could store in the on-disk format.
+ *
+ * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
+ * for the root node this checks the available space in the dinode fork
+ * so that we can resize the in-memory buffer to match it. After a
+ * resize to the maximum size this function returns the same value
+ * as xfs_bmbt_get_maxrecs for the root node, too.
+ */
+STATIC int
+xfs_bmbt_get_dmaxrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ if (level != cur->bc_nlevels - 1)
+ return cur->bc_mp->m_bmap_dmxr[level != 0];
+ return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
+}
+
+STATIC void
+xfs_bmbt_init_key_from_rec(
+ union xfs_btree_key *key,
+ union xfs_btree_rec *rec)
+{
+ key->bmbt.br_startoff =
+ cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
+}
+
+STATIC void
+xfs_bmbt_init_rec_from_key(
+ union xfs_btree_key *key,
+ union xfs_btree_rec *rec)
+{
+ ASSERT(key->bmbt.br_startoff != 0);
+
+ xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
+ 0, 0, XFS_EXT_NORM);
+}
+
+STATIC void
+xfs_bmbt_init_rec_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec)
+{
+ xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
+}
+
+STATIC void
+xfs_bmbt_init_ptr_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr)
+{
+ ptr->l = 0;
+}
+
+STATIC __int64_t
+xfs_bmbt_key_diff(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *key)
+{
+ return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
+ cur->bc_rec.b.br_startoff;
+}
+
+static bool
+xfs_bmbt_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ unsigned int level;
+
+ switch (block->bb_magic) {
+ case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return false;
+ if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid))
+ return false;
+ if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn)
+ return false;
+ /*
+ * XXX: need a better way of verifying the owner here. Right now
+ * just make sure there has been one set.
+ */
+ if (be64_to_cpu(block->bb_u.l.bb_owner) == 0)
+ return false;
+ /* fall through */
+ case cpu_to_be32(XFS_BMAP_MAGIC):
+ break;
+ default:
+ return false;
+ }
+
+ /*
+ * numrecs and level verification.
+ *
+ * We don't know what fork we belong to, so just verify that the level
+ * is less than the maximum of the two. Later checks will be more
+ * precise.
+ */
+ level = be16_to_cpu(block->bb_level);
+ if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
+ return false;
+ if (be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
+ return false;
+
+ /* sibling pointer verification */
+ if (!block->bb_u.l.bb_leftsib ||
+ (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLDFSBNO) &&
+ !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))))
+ return false;
+ if (!block->bb_u.l.bb_rightsib ||
+ (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLDFSBNO) &&
+ !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))))
+ return false;
+
+ return true;
+}
+
+static void
+xfs_bmbt_read_verify(
+ struct xfs_buf *bp)
+{
+ if (!xfs_btree_lblock_verify_crc(bp))
+ xfs_buf_ioerror(bp, EFSBADCRC);
+ else if (!xfs_bmbt_verify(bp))
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+
+ if (bp->b_error) {
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ xfs_verifier_error(bp);
+ }
+}
+
+static void
+xfs_bmbt_write_verify(
+ struct xfs_buf *bp)
+{
+ if (!xfs_bmbt_verify(bp)) {
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ xfs_buf_ioerror(bp, EFSCORRUPTED);
+ xfs_verifier_error(bp);
+ return;
+ }
+ xfs_btree_lblock_calc_crc(bp);
+}
+
+const struct xfs_buf_ops xfs_bmbt_buf_ops = {
+ .verify_read = xfs_bmbt_read_verify,
+ .verify_write = xfs_bmbt_write_verify,
+};
+
+
+#if defined(DEBUG) || defined(XFS_WARN)
+STATIC int
+xfs_bmbt_keys_inorder(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *k1,
+ union xfs_btree_key *k2)
+{
+ return be64_to_cpu(k1->bmbt.br_startoff) <
+ be64_to_cpu(k2->bmbt.br_startoff);
+}
+
+STATIC int
+xfs_bmbt_recs_inorder(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *r1,
+ union xfs_btree_rec *r2)
+{
+ return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
+ xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
+ xfs_bmbt_disk_get_startoff(&r2->bmbt);
+}
+#endif /* DEBUG */
+
+static const struct xfs_btree_ops xfs_bmbt_ops = {
+ .rec_len = sizeof(xfs_bmbt_rec_t),
+ .key_len = sizeof(xfs_bmbt_key_t),
+
+ .dup_cursor = xfs_bmbt_dup_cursor,
+ .update_cursor = xfs_bmbt_update_cursor,
+ .alloc_block = xfs_bmbt_alloc_block,
+ .free_block = xfs_bmbt_free_block,
+ .get_maxrecs = xfs_bmbt_get_maxrecs,
+ .get_minrecs = xfs_bmbt_get_minrecs,
+ .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
+ .init_key_from_rec = xfs_bmbt_init_key_from_rec,
+ .init_rec_from_key = xfs_bmbt_init_rec_from_key,
+ .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
+ .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
+ .key_diff = xfs_bmbt_key_diff,
+ .buf_ops = &xfs_bmbt_buf_ops,
+#if defined(DEBUG) || defined(XFS_WARN)
+ .keys_inorder = xfs_bmbt_keys_inorder,
+ .recs_inorder = xfs_bmbt_recs_inorder,
+#endif
+};
+
+/*
+ * Allocate a new bmap btree cursor.
+ */
+struct xfs_btree_cur * /* new bmap btree cursor */
+xfs_bmbt_init_cursor(
+ struct xfs_mount *mp, /* file system mount point */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_inode *ip, /* inode owning the btree */
+ int whichfork) /* data or attr fork */
+{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_btree_cur *cur;
+
+ cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
+
+ cur->bc_tp = tp;
+ cur->bc_mp = mp;
+ cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
+ cur->bc_btnum = XFS_BTNUM_BMAP;
+ cur->bc_blocklog = mp->m_sb.sb_blocklog;
+
+ cur->bc_ops = &xfs_bmbt_ops;
+ cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
+
+ cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
+ cur->bc_private.b.ip = ip;
+ cur->bc_private.b.firstblock = NULLFSBLOCK;
+ cur->bc_private.b.flist = NULL;
+ cur->bc_private.b.allocated = 0;
+ cur->bc_private.b.flags = 0;
+ cur->bc_private.b.whichfork = whichfork;
+
+ return cur;
+}
+
+/*
+ * Calculate number of records in a bmap btree block.
+ */
+int
+xfs_bmbt_maxrecs(
+ struct xfs_mount *mp,
+ int blocklen,
+ int leaf)
+{
+ blocklen -= XFS_BMBT_BLOCK_LEN(mp);
+
+ if (leaf)
+ return blocklen / sizeof(xfs_bmbt_rec_t);
+ return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
+}
+
+/*
+ * Calculate number of records in a bmap btree inode root.
+ */
+int
+xfs_bmdr_maxrecs(
+ int blocklen,
+ int leaf)
+{
+ blocklen -= sizeof(xfs_bmdr_block_t);
+
+ if (leaf)
+ return blocklen / sizeof(xfs_bmdr_rec_t);
+ return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
+}
+
+/*
+ * Change the owner of a btree format fork fo the inode passed in. Change it to
+ * the owner of that is passed in so that we can change owners before or after
+ * we switch forks between inodes. The operation that the caller is doing will
+ * determine whether is needs to change owner before or after the switch.
+ *
+ * For demand paged transactional modification, the fork switch should be done
+ * after reading in all the blocks, modifying them and pinning them in the
+ * transaction. For modification when the buffers are already pinned in memory,
+ * the fork switch can be done before changing the owner as we won't need to
+ * validate the owner until the btree buffers are unpinned and writes can occur
+ * again.
+ *
+ * For recovery based ownership change, there is no transactional context and
+ * so a buffer list must be supplied so that we can record the buffers that we
+ * modified for the caller to issue IO on.
+ */
+int
+xfs_bmbt_change_owner(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ int whichfork,
+ xfs_ino_t new_owner,
+ struct list_head *buffer_list)
+{
+ struct xfs_btree_cur *cur;
+ int error;
+
+ ASSERT(tp || buffer_list);
+ ASSERT(!(tp && buffer_list));
+ if (whichfork == XFS_DATA_FORK)
+ ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
+ else
+ ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
+
+ cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
+ if (!cur)
+ return ENOMEM;
+
+ error = xfs_btree_change_owner(cur, new_owner, buffer_list);
+ xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ return error;
+}
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index cd0d4b4bb81..819a8a4dee9 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -18,103 +18,11 @@
#ifndef __XFS_BMAP_BTREE_H__
#define __XFS_BMAP_BTREE_H__
-#define XFS_BMAP_MAGIC 0x424d4150 /* 'BMAP' */
-
struct xfs_btree_cur;
-struct xfs_btree_lblock;
+struct xfs_btree_block;
struct xfs_mount;
struct xfs_inode;
-
-/*
- * Bmap root header, on-disk form only.
- */
-typedef struct xfs_bmdr_block {
- __be16 bb_level; /* 0 is a leaf */
- __be16 bb_numrecs; /* current # of data records */
-} xfs_bmdr_block_t;
-
-/*
- * Bmap btree record and extent descriptor.
- * l0:63 is an extent flag (value 1 indicates non-normal).
- * l0:9-62 are startoff.
- * l0:0-8 and l1:21-63 are startblock.
- * l1:0-20 are blockcount.
- */
-#define BMBT_EXNTFLAG_BITLEN 1
-#define BMBT_STARTOFF_BITLEN 54
-#define BMBT_STARTBLOCK_BITLEN 52
-#define BMBT_BLOCKCOUNT_BITLEN 21
-
-
-#define BMBT_USE_64 1
-
-typedef struct xfs_bmbt_rec_32
-{
- __uint32_t l0, l1, l2, l3;
-} xfs_bmbt_rec_32_t;
-typedef struct xfs_bmbt_rec_64
-{
- __be64 l0, l1;
-} xfs_bmbt_rec_64_t;
-
-typedef __uint64_t xfs_bmbt_rec_base_t; /* use this for casts */
-typedef xfs_bmbt_rec_64_t xfs_bmbt_rec_t, xfs_bmdr_rec_t;
-
-typedef struct xfs_bmbt_rec_host {
- __uint64_t l0, l1;
-} xfs_bmbt_rec_host_t;
-
-/*
- * Values and macros for delayed-allocation startblock fields.
- */
-#define STARTBLOCKVALBITS 17
-#define STARTBLOCKMASKBITS (15 + XFS_BIG_BLKNOS * 20)
-#define DSTARTBLOCKMASKBITS (15 + 20)
-#define STARTBLOCKMASK \
- (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-#define DSTARTBLOCKMASK \
- (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-
-#define ISNULLSTARTBLOCK(x) isnullstartblock(x)
-static inline int isnullstartblock(xfs_fsblock_t x)
-{
- return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK;
-}
-
-#define ISNULLDSTARTBLOCK(x) isnulldstartblock(x)
-static inline int isnulldstartblock(xfs_dfsbno_t x)
-{
- return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK;
-}
-
-#define NULLSTARTBLOCK(k) nullstartblock(k)
-static inline xfs_fsblock_t nullstartblock(int k)
-{
- ASSERT(k < (1 << STARTBLOCKVALBITS));
- return STARTBLOCKMASK | (k);
-}
-
-#define STARTBLOCKVAL(x) startblockval(x)
-static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
-{
- return (xfs_filblks_t)((x) & ~STARTBLOCKMASK);
-}
-
-/*
- * Possible extent formats.
- */
-typedef enum {
- XFS_EXTFMT_NOSTATE = 0,
- XFS_EXTFMT_HASSTATE
-} xfs_exntfmt_t;
-
-/*
- * Possible extent states.
- */
-typedef enum {
- XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
- XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
-} xfs_exntst_t;
+struct xfs_trans;
/*
* Extent state and extent format macros.
@@ -125,165 +33,88 @@ typedef enum {
#define ISUNWRITTEN(x) ((x)->br_state == XFS_EXT_UNWRITTEN)
/*
- * Incore version of above.
+ * Btree block header size depends on a superblock flag.
*/
-typedef struct xfs_bmbt_irec
-{
- xfs_fileoff_t br_startoff; /* starting file offset */
- xfs_fsblock_t br_startblock; /* starting block number */
- xfs_filblks_t br_blockcount; /* number of blocks */
- xfs_exntst_t br_state; /* extent state */
-} xfs_bmbt_irec_t;
-
-/*
- * Key structure for non-leaf levels of the tree.
- */
-typedef struct xfs_bmbt_key {
- __be64 br_startoff; /* starting file offset */
-} xfs_bmbt_key_t, xfs_bmdr_key_t;
-
-/* btree pointer type */
-typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
-
-/* btree block header type */
-typedef struct xfs_btree_lblock xfs_bmbt_block_t;
-
-#define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)XFS_BUF_PTR(bp))
-
-#define XFS_BMAP_RBLOCK_DSIZE(lev,cur) ((cur)->bc_private.b.forksize)
-#define XFS_BMAP_RBLOCK_ISIZE(lev,cur) \
- ((int)XFS_IFORK_PTR((cur)->bc_private.b.ip, \
- (cur)->bc_private.b.whichfork)->if_broot_bytes)
-
-#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur) \
- (((lev) == (cur)->bc_nlevels - 1 ? \
- XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \
- xfs_bmdr, (lev) == 0) : \
- ((cur)->bc_mp->m_bmap_dmxr[(lev) != 0])))
-#define XFS_BMAP_BLOCK_IMAXRECS(lev,cur) \
- (((lev) == (cur)->bc_nlevels - 1 ? \
- XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur),\
- xfs_bmbt, (lev) == 0) : \
- ((cur)->bc_mp->m_bmap_dmxr[(lev) != 0])))
-
-#define XFS_BMAP_BLOCK_DMINRECS(lev,cur) \
- (((lev) == (cur)->bc_nlevels - 1 ? \
- XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur),\
- xfs_bmdr, (lev) == 0) : \
- ((cur)->bc_mp->m_bmap_dmnr[(lev) != 0])))
-#define XFS_BMAP_BLOCK_IMINRECS(lev,cur) \
- (((lev) == (cur)->bc_nlevels - 1 ? \
- XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur),\
- xfs_bmbt, (lev) == 0) : \
- ((cur)->bc_mp->m_bmap_dmnr[(lev) != 0])))
-
-#define XFS_BMAP_REC_DADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i))
-
-#define XFS_BMAP_REC_IADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i))
-
-#define XFS_BMAP_KEY_DADDR(bb,i,cur) \
- (XFS_BTREE_KEY_ADDR(xfs_bmbt, bb, i))
-
-#define XFS_BMAP_KEY_IADDR(bb,i,cur) \
- (XFS_BTREE_KEY_ADDR(xfs_bmbt, bb, i))
-
-#define XFS_BMAP_PTR_DADDR(bb,i,cur) \
- (XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \
- be16_to_cpu((bb)->bb_level), cur)))
-#define XFS_BMAP_PTR_IADDR(bb,i,cur) \
- (XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \
- be16_to_cpu((bb)->bb_level), cur)))
+#define XFS_BMBT_BLOCK_LEN(mp) \
+ (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
+ XFS_BTREE_LBLOCK_CRC_LEN : XFS_BTREE_LBLOCK_LEN)
+
+#define XFS_BMBT_REC_ADDR(mp, block, index) \
+ ((xfs_bmbt_rec_t *) \
+ ((char *)(block) + \
+ XFS_BMBT_BLOCK_LEN(mp) + \
+ ((index) - 1) * sizeof(xfs_bmbt_rec_t)))
+
+#define XFS_BMBT_KEY_ADDR(mp, block, index) \
+ ((xfs_bmbt_key_t *) \
+ ((char *)(block) + \
+ XFS_BMBT_BLOCK_LEN(mp) + \
+ ((index) - 1) * sizeof(xfs_bmbt_key_t)))
+
+#define XFS_BMBT_PTR_ADDR(mp, block, index, maxrecs) \
+ ((xfs_bmbt_ptr_t *) \
+ ((char *)(block) + \
+ XFS_BMBT_BLOCK_LEN(mp) + \
+ (maxrecs) * sizeof(xfs_bmbt_key_t) + \
+ ((index) - 1) * sizeof(xfs_bmbt_ptr_t)))
+
+#define XFS_BMDR_REC_ADDR(block, index) \
+ ((xfs_bmdr_rec_t *) \
+ ((char *)(block) + \
+ sizeof(struct xfs_bmdr_block) + \
+ ((index) - 1) * sizeof(xfs_bmdr_rec_t)))
+
+#define XFS_BMDR_KEY_ADDR(block, index) \
+ ((xfs_bmdr_key_t *) \
+ ((char *)(block) + \
+ sizeof(struct xfs_bmdr_block) + \
+ ((index) - 1) * sizeof(xfs_bmdr_key_t)))
+
+#define XFS_BMDR_PTR_ADDR(block, index, maxrecs) \
+ ((xfs_bmdr_ptr_t *) \
+ ((char *)(block) + \
+ sizeof(struct xfs_bmdr_block) + \
+ (maxrecs) * sizeof(xfs_bmdr_key_t) + \
+ ((index) - 1) * sizeof(xfs_bmdr_ptr_t)))
/*
* These are to be used when we know the size of the block and
* we don't have a cursor.
*/
-#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz) \
- (XFS_BTREE_REC_ADDR(xfs_bmbt,bb,i))
-#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) \
- (XFS_BTREE_KEY_ADDR(xfs_bmbt,bb,i))
-#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \
- (XFS_BTREE_PTR_ADDR(xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)))
-
-#define XFS_BMAP_BROOT_NUMRECS(bb) be16_to_cpu((bb)->bb_numrecs)
-#define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0)
+#define XFS_BMAP_BROOT_PTR_ADDR(mp, bb, i, sz) \
+ XFS_BMBT_PTR_ADDR(mp, bb, i, xfs_bmbt_maxrecs(mp, sz, 0))
-#define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \
- (int)(sizeof(xfs_bmbt_block_t) + \
+#define XFS_BMAP_BROOT_SPACE_CALC(mp, nrecs) \
+ (int)(XFS_BMBT_BLOCK_LEN(mp) + \
((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))
-#define XFS_BMAP_BROOT_SPACE(bb) \
- (XFS_BMAP_BROOT_SPACE_CALC(be16_to_cpu((bb)->bb_numrecs)))
+#define XFS_BMAP_BROOT_SPACE(mp, bb) \
+ (XFS_BMAP_BROOT_SPACE_CALC(mp, be16_to_cpu((bb)->bb_numrecs)))
#define XFS_BMDR_SPACE_CALC(nrecs) \
(int)(sizeof(xfs_bmdr_block_t) + \
((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))
+#define XFS_BMAP_BMDR_SPACE(bb) \
+ (XFS_BMDR_SPACE_CALC(be16_to_cpu((bb)->bb_numrecs)))
/*
* Maximum number of bmap btree levels.
*/
#define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[(w)])
-#define XFS_BMAP_SANITY_CHECK(mp,bb,level) \
- (be32_to_cpu((bb)->bb_magic) == XFS_BMAP_MAGIC && \
- be16_to_cpu((bb)->bb_level) == level && \
- be16_to_cpu((bb)->bb_numrecs) > 0 && \
- be16_to_cpu((bb)->bb_numrecs) <= (mp)->m_bmap_dmxr[(level) != 0])
-
-
-#ifdef __KERNEL__
-
-#if defined(XFS_BMBT_TRACE)
-/*
- * Trace buffer entry types.
- */
-#define XFS_BMBT_KTRACE_ARGBI 1
-#define XFS_BMBT_KTRACE_ARGBII 2
-#define XFS_BMBT_KTRACE_ARGFFFI 3
-#define XFS_BMBT_KTRACE_ARGI 4
-#define XFS_BMBT_KTRACE_ARGIFK 5
-#define XFS_BMBT_KTRACE_ARGIFR 6
-#define XFS_BMBT_KTRACE_ARGIK 7
-#define XFS_BMBT_KTRACE_CUR 8
-
-#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */
-#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */
-extern ktrace_t *xfs_bmbt_trace_buf;
-#endif
-
/*
* Prototypes for xfs_bmap.c to call.
*/
-extern void xfs_bmdr_to_bmbt(xfs_bmdr_block_t *, int, xfs_bmbt_block_t *, int);
-extern int xfs_bmbt_decrement(struct xfs_btree_cur *, int, int *);
-extern int xfs_bmbt_delete(struct xfs_btree_cur *, int *);
+extern void xfs_bmdr_to_bmbt(struct xfs_inode *, xfs_bmdr_block_t *, int,
+ struct xfs_btree_block *, int);
extern void xfs_bmbt_get_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s);
-extern xfs_bmbt_block_t *xfs_bmbt_get_block(struct xfs_btree_cur *cur,
- int, struct xfs_buf **bpp);
extern xfs_filblks_t xfs_bmbt_get_blockcount(xfs_bmbt_rec_host_t *r);
extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r);
extern xfs_fileoff_t xfs_bmbt_get_startoff(xfs_bmbt_rec_host_t *r);
extern xfs_exntst_t xfs_bmbt_get_state(xfs_bmbt_rec_host_t *r);
-extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r);
extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r);
-extern int xfs_bmbt_increment(struct xfs_btree_cur *, int, int *);
-extern int xfs_bmbt_insert(struct xfs_btree_cur *, int *);
-extern void xfs_bmbt_log_block(struct xfs_btree_cur *, struct xfs_buf *, int);
-extern void xfs_bmbt_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int,
- int);
-extern int xfs_bmbt_lookup_eq(struct xfs_btree_cur *, xfs_fileoff_t,
- xfs_fsblock_t, xfs_filblks_t, int *);
-extern int xfs_bmbt_lookup_ge(struct xfs_btree_cur *, xfs_fileoff_t,
- xfs_fsblock_t, xfs_filblks_t, int *);
-
-/*
- * Give the bmap btree a new root block. Copy the old broot contents
- * down into a real block and make the broot point to it.
- */
-extern int xfs_bmbt_newroot(struct xfs_btree_cur *cur, int *lflags, int *stat);
-
extern void xfs_bmbt_set_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s);
extern void xfs_bmbt_set_allf(xfs_bmbt_rec_host_t *r, xfs_fileoff_t o,
xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v);
@@ -292,14 +123,21 @@ extern void xfs_bmbt_set_startblock(xfs_bmbt_rec_host_t *r, xfs_fsblock_t v);
extern void xfs_bmbt_set_startoff(xfs_bmbt_rec_host_t *r, xfs_fileoff_t v);
extern void xfs_bmbt_set_state(xfs_bmbt_rec_host_t *r, xfs_exntst_t v);
-extern void xfs_bmbt_disk_set_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o,
xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v);
-extern void xfs_bmbt_to_bmdr(xfs_bmbt_block_t *, int, xfs_bmdr_block_t *, int);
-extern int xfs_bmbt_update(struct xfs_btree_cur *, xfs_fileoff_t,
- xfs_fsblock_t, xfs_filblks_t, xfs_exntst_t);
+extern void xfs_bmbt_to_bmdr(struct xfs_mount *, struct xfs_btree_block *, int,
+ xfs_bmdr_block_t *, int);
+
+extern int xfs_bmbt_get_maxrecs(struct xfs_btree_cur *, int level);
+extern int xfs_bmdr_maxrecs(int blocklen, int leaf);
+extern int xfs_bmbt_maxrecs(struct xfs_mount *, int blocklen, int leaf);
+
+extern int xfs_bmbt_change_owner(struct xfs_trans *tp, struct xfs_inode *ip,
+ int whichfork, xfs_ino_t new_owner,
+ struct list_head *buffer_list);
-#endif /* __KERNEL__ */
+extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
+ struct xfs_trans *, struct xfs_inode *, int);
#endif /* __XFS_BMAP_BTREE_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
new file mode 100644
index 00000000000..64731ef3324
--- /dev/null
+++ b/fs/xfs/xfs_bmap_util.c
@@ -0,0 +1,1897 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * Copyright (c) 2012 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
+#include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_trans.h"
+#include "xfs_extfree_item.h"
+#include "xfs_alloc.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_quota.h"
+#include "xfs_trans_space.h"
+#include "xfs_trace.h"
+#include "xfs_icache.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
+
+/* Kernel only BMAP related definitions and functions */
+
+/*
+ * Convert the given file system block to a disk block. We have to treat it
+ * differently based on whether the file is a real time file or not, because the
+ * bmap code does.
+ */
+xfs_daddr_t
+xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
+{
+ return (XFS_IS_REALTIME_INODE(ip) ? \
+ (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
+ XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
+}
+
+/*
+ * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
+ * caller. Frees all the extents that need freeing, which must be done
+ * last due to locking considerations. We never free any extents in
+ * the first transaction.
+ *
+ * Return 1 if the given transaction was committed and a new one
+ * started, and 0 otherwise in the committed parameter.
+ */
+int /* error */
+xfs_bmap_finish(
+ xfs_trans_t **tp, /* transaction pointer addr */
+ xfs_bmap_free_t *flist, /* i/o: list extents to free */
+ int *committed) /* xact committed or not */
+{
+ xfs_efd_log_item_t *efd; /* extent free data */
+ xfs_efi_log_item_t *efi; /* extent free intention */
+ int error; /* error return value */
+ xfs_bmap_free_item_t *free; /* free extent item */
+ struct xfs_trans_res tres; /* new log reservation */
+ xfs_mount_t *mp; /* filesystem mount structure */
+ xfs_bmap_free_item_t *next; /* next item on free list */
+ xfs_trans_t *ntp; /* new transaction pointer */
+
+ ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
+ if (flist->xbf_count == 0) {
+ *committed = 0;
+ return 0;
+ }
+ ntp = *tp;
+ efi = xfs_trans_get_efi(ntp, flist->xbf_count);
+ for (free = flist->xbf_first; free; free = free->xbfi_next)
+ xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
+ free->xbfi_blockcount);
+
+ tres.tr_logres = ntp->t_log_res;
+ tres.tr_logcount = ntp->t_log_count;
+ tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
+ ntp = xfs_trans_dup(*tp);
+ error = xfs_trans_commit(*tp, 0);
+ *tp = ntp;
+ *committed = 1;
+ /*
+ * We have a new transaction, so we should return committed=1,
+ * even though we're returning an error.
+ */
+ if (error)
+ return error;
+
+ /*
+ * transaction commit worked ok so we can drop the extra ticket
+ * reference that we gained in xfs_trans_dup()
+ */
+ xfs_log_ticket_put(ntp->t_ticket);
+
+ error = xfs_trans_reserve(ntp, &tres, 0, 0);
+ if (error)
+ return error;
+ efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
+ for (free = flist->xbf_first; free != NULL; free = next) {
+ next = free->xbfi_next;
+ if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
+ free->xbfi_blockcount))) {
+ /*
+ * The bmap free list will be cleaned up at a
+ * higher level. The EFI will be canceled when
+ * this transaction is aborted.
+ * Need to force shutdown here to make sure it
+ * happens, since this transaction may not be
+ * dirty yet.
+ */
+ mp = ntp->t_mountp;
+ if (!XFS_FORCED_SHUTDOWN(mp))
+ xfs_force_shutdown(mp,
+ (error == EFSCORRUPTED) ?
+ SHUTDOWN_CORRUPT_INCORE :
+ SHUTDOWN_META_IO_ERROR);
+ return error;
+ }
+ xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
+ free->xbfi_blockcount);
+ xfs_bmap_del_free(flist, NULL, free);
+ }
+ return 0;
+}
+
+int
+xfs_bmap_rtalloc(
+ struct xfs_bmalloca *ap) /* bmap alloc argument struct */
+{
+ xfs_alloctype_t atype = 0; /* type for allocation routines */
+ int error; /* error return value */
+ xfs_mount_t *mp; /* mount point structure */
+ xfs_extlen_t prod = 0; /* product factor for allocators */
+ xfs_extlen_t ralen = 0; /* realtime allocation length */
+ xfs_extlen_t align; /* minimum allocation alignment */
+ xfs_rtblock_t rtb;
+
+ mp = ap->ip->i_mount;
+ align = xfs_get_extsz_hint(ap->ip);
+ prod = align / mp->m_sb.sb_rextsize;
+ error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
+ align, 1, ap->eof, 0,
+ ap->conv, &ap->offset, &ap->length);
+ if (error)
+ return error;
+ ASSERT(ap->length);
+ ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
+
+ /*
+ * If the offset & length are not perfectly aligned
+ * then kill prod, it will just get us in trouble.
+ */
+ if (do_mod(ap->offset, align) || ap->length % align)
+ prod = 1;
+ /*
+ * Set ralen to be the actual requested length in rtextents.
+ */
+ ralen = ap->length / mp->m_sb.sb_rextsize;
+ /*
+ * If the old value was close enough to MAXEXTLEN that
+ * we rounded up to it, cut it back so it's valid again.
+ * Note that if it's a really large request (bigger than
+ * MAXEXTLEN), we don't hear about that number, and can't
+ * adjust the starting point to match it.
+ */
+ if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
+ ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
+
+ /*
+ * Lock out other modifications to the RT bitmap inode.
+ */
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+
+ /*
+ * If it's an allocation to an empty file at offset 0,
+ * pick an extent that will space things out in the rt area.
+ */
+ if (ap->eof && ap->offset == 0) {
+ xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
+
+ error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
+ if (error)
+ return error;
+ ap->blkno = rtx * mp->m_sb.sb_rextsize;
+ } else {
+ ap->blkno = 0;
+ }
+
+ xfs_bmap_adjacent(ap);
+
+ /*
+ * Realtime allocation, done through xfs_rtallocate_extent.
+ */
+ atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
+ do_div(ap->blkno, mp->m_sb.sb_rextsize);
+ rtb = ap->blkno;
+ ap->length = ralen;
+ if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
+ &ralen, atype, ap->wasdel, prod, &rtb)))
+ return error;
+ if (rtb == NULLFSBLOCK && prod > 1 &&
+ (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
+ ap->length, &ralen, atype,
+ ap->wasdel, 1, &rtb)))
+ return error;
+ ap->blkno = rtb;
+ if (ap->blkno != NULLFSBLOCK) {
+ ap->blkno *= mp->m_sb.sb_rextsize;
+ ralen *= mp->m_sb.sb_rextsize;
+ ap->length = ralen;
+ ap->ip->i_d.di_nblocks += ralen;
+ xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
+ if (ap->wasdel)
+ ap->ip->i_delayed_blks -= ralen;
+ /*
+ * Adjust the disk quota also. This was reserved
+ * earlier.
+ */
+ xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
+ ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
+ XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
+ } else {
+ ap->length = 0;
+ }
+ return 0;
+}
+
+/*
+ * Check if the endoff is outside the last extent. If so the caller will grow
+ * the allocation to a stripe unit boundary. All offsets are considered outside
+ * the end of file for an empty fork, so 1 is returned in *eof in that case.
+ */
+int
+xfs_bmap_eof(
+ struct xfs_inode *ip,
+ xfs_fileoff_t endoff,
+ int whichfork,
+ int *eof)
+{
+ struct xfs_bmbt_irec rec;
+ int error;
+
+ error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
+ if (error || *eof)
+ return error;
+
+ *eof = endoff >= rec.br_startoff + rec.br_blockcount;
+ return 0;
+}
+
+/*
+ * Extent tree block counting routines.
+ */
+
+/*
+ * Count leaf blocks given a range of extent records.
+ */
+STATIC void
+xfs_bmap_count_leaves(
+ xfs_ifork_t *ifp,
+ xfs_extnum_t idx,
+ int numrecs,
+ int *count)
+{
+ int b;
+
+ for (b = 0; b < numrecs; b++) {
+ xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
+ *count += xfs_bmbt_get_blockcount(frp);
+ }
+}
+
+/*
+ * Count leaf blocks given a range of extent records originally
+ * in btree format.
+ */
+STATIC void
+xfs_bmap_disk_count_leaves(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *block,
+ int numrecs,
+ int *count)
+{
+ int b;
+ xfs_bmbt_rec_t *frp;
+
+ for (b = 1; b <= numrecs; b++) {
+ frp = XFS_BMBT_REC_ADDR(mp, block, b);
+ *count += xfs_bmbt_disk_get_blockcount(frp);
+ }
+}
+
+/*
+ * Recursively walks each level of a btree
+ * to count total fsblocks in use.
+ */
+STATIC int /* error */
+xfs_bmap_count_tree(
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_ifork_t *ifp, /* inode fork pointer */
+ xfs_fsblock_t blockno, /* file system block number */
+ int levelin, /* level in btree */
+ int *count) /* Count of blocks */
+{
+ int error;
+ xfs_buf_t *bp, *nbp;
+ int level = levelin;
+ __be64 *pp;
+ xfs_fsblock_t bno = blockno;
+ xfs_fsblock_t nextbno;
+ struct xfs_btree_block *block, *nextblock;
+ int numrecs;
+
+ error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
+ &xfs_bmbt_buf_ops);
+ if (error)
+ return error;
+ *count += 1;
+ block = XFS_BUF_TO_BLOCK(bp);
+
+ if (--level) {
+ /* Not at node above leaves, count this level of nodes */
+ nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
+ while (nextbno != NULLFSBLOCK) {
+ error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
+ XFS_BMAP_BTREE_REF,
+ &xfs_bmbt_buf_ops);
+ if (error)
+ return error;
+ *count += 1;
+ nextblock = XFS_BUF_TO_BLOCK(nbp);
+ nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
+ xfs_trans_brelse(tp, nbp);
+ }
+
+ /* Dive to the next level */
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
+ bno = be64_to_cpu(*pp);
+ if (unlikely((error =
+ xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
+ xfs_trans_brelse(tp, bp);
+ XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
+ XFS_ERRLEVEL_LOW, mp);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+ xfs_trans_brelse(tp, bp);
+ } else {
+ /* count all level 1 nodes and their leaves */
+ for (;;) {
+ nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
+ numrecs = be16_to_cpu(block->bb_numrecs);
+ xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
+ xfs_trans_brelse(tp, bp);
+ if (nextbno == NULLFSBLOCK)
+ break;
+ bno = nextbno;
+ error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
+ XFS_BMAP_BTREE_REF,
+ &xfs_bmbt_buf_ops);
+ if (error)
+ return error;
+ *count += 1;
+ block = XFS_BUF_TO_BLOCK(bp);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Count fsblocks of the given fork.
+ */
+int /* error */
+xfs_bmap_count_blocks(
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_inode_t *ip, /* incore inode */
+ int whichfork, /* data or attr fork */
+ int *count) /* out: count of blocks */
+{
+ struct xfs_btree_block *block; /* current btree block */
+ xfs_fsblock_t bno; /* block # of "block" */
+ xfs_ifork_t *ifp; /* fork structure */
+ int level; /* btree level, for checking */
+ xfs_mount_t *mp; /* file system mount structure */
+ __be64 *pp; /* pointer to block address */
+
+ bno = NULLFSBLOCK;
+ mp = ip->i_mount;
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
+ xfs_bmap_count_leaves(ifp, 0,
+ ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
+ count);
+ return 0;
+ }
+
+ /*
+ * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+ */
+ block = ifp->if_broot;
+ level = be16_to_cpu(block->bb_level);
+ ASSERT(level > 0);
+ pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
+ bno = be64_to_cpu(*pp);
+ ASSERT(bno != NULLDFSBNO);
+ ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
+ ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
+
+ if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
+ XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
+ mp);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+
+ return 0;
+}
+
+/*
+ * returns 1 for success, 0 if we failed to map the extent.
+ */
+STATIC int
+xfs_getbmapx_fix_eof_hole(
+ xfs_inode_t *ip, /* xfs incore inode pointer */
+ struct getbmapx *out, /* output structure */
+ int prealloced, /* this is a file with
+ * preallocated data space */
+ __int64_t end, /* last block requested */
+ xfs_fsblock_t startblock)
+{
+ __int64_t fixlen;
+ xfs_mount_t *mp; /* file system mount point */
+ xfs_ifork_t *ifp; /* inode fork pointer */
+ xfs_extnum_t lastx; /* last extent pointer */
+ xfs_fileoff_t fileblock;
+
+ if (startblock == HOLESTARTBLOCK) {
+ mp = ip->i_mount;
+ out->bmv_block = -1;
+ fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
+ fixlen -= out->bmv_offset;
+ if (prealloced && out->bmv_offset + out->bmv_length == end) {
+ /* Came to hole at EOF. Trim it. */
+ if (fixlen <= 0)
+ return 0;
+ out->bmv_length = fixlen;
+ }
+ } else {
+ if (startblock == DELAYSTARTBLOCK)
+ out->bmv_block = -2;
+ else
+ out->bmv_block = xfs_fsb_to_db(ip, startblock);
+ fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
+ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
+ (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
+ out->bmv_oflags |= BMV_OF_LAST;
+ }
+
+ return 1;
+}
+
+/*
+ * Get inode's extents as described in bmv, and format for output.
+ * Calls formatter to fill the user's buffer until all extents
+ * are mapped, until the passed-in bmv->bmv_count slots have
+ * been filled, or until the formatter short-circuits the loop,
+ * if it is tracking filled-in extents on its own.
+ */
+int /* error code */
+xfs_getbmap(
+ xfs_inode_t *ip,
+ struct getbmapx *bmv, /* user bmap structure */
+ xfs_bmap_format_t formatter, /* format to user */
+ void *arg) /* formatter arg */
+{
+ __int64_t bmvend; /* last block requested */
+ int error = 0; /* return value */
+ __int64_t fixlen; /* length for -1 case */
+ int i; /* extent number */
+ int lock; /* lock state */
+ xfs_bmbt_irec_t *map; /* buffer for user's data */
+ xfs_mount_t *mp; /* file system mount point */
+ int nex; /* # of user extents can do */
+ int nexleft; /* # of user extents left */
+ int subnex; /* # of bmapi's can do */
+ int nmap; /* number of map entries */
+ struct getbmapx *out; /* output structure */
+ int whichfork; /* data or attr fork */
+ int prealloced; /* this is a file with
+ * preallocated data space */
+ int iflags; /* interface flags */
+ int bmapi_flags; /* flags for xfs_bmapi */
+ int cur_ext = 0;
+
+ mp = ip->i_mount;
+ iflags = bmv->bmv_iflags;
+ whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
+
+ if (whichfork == XFS_ATTR_FORK) {
+ if (XFS_IFORK_Q(ip)) {
+ if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
+ ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
+ ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
+ return XFS_ERROR(EINVAL);
+ } else if (unlikely(
+ ip->i_d.di_aformat != 0 &&
+ ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
+ XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
+ ip->i_mount);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+
+ prealloced = 0;
+ fixlen = 1LL << 32;
+ } else {
+ if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
+ ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
+ ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+ return XFS_ERROR(EINVAL);
+
+ if (xfs_get_extsz_hint(ip) ||
+ ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
+ prealloced = 1;
+ fixlen = mp->m_super->s_maxbytes;
+ } else {
+ prealloced = 0;
+ fixlen = XFS_ISIZE(ip);
+ }
+ }
+
+ if (bmv->bmv_length == -1) {
+ fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
+ bmv->bmv_length =
+ max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
+ } else if (bmv->bmv_length == 0) {
+ bmv->bmv_entries = 0;
+ return 0;
+ } else if (bmv->bmv_length < 0) {
+ return XFS_ERROR(EINVAL);
+ }
+
+ nex = bmv->bmv_count - 1;
+ if (nex <= 0)
+ return XFS_ERROR(EINVAL);
+ bmvend = bmv->bmv_offset + bmv->bmv_length;
+
+
+ if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
+ return XFS_ERROR(ENOMEM);
+ out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
+ if (!out)
+ return XFS_ERROR(ENOMEM);
+
+ xfs_ilock(ip, XFS_IOLOCK_SHARED);
+ if (whichfork == XFS_DATA_FORK) {
+ if (!(iflags & BMV_IF_DELALLOC) &&
+ (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
+ error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
+ if (error)
+ goto out_unlock_iolock;
+
+ /*
+ * Even after flushing the inode, there can still be
+ * delalloc blocks on the inode beyond EOF due to
+ * speculative preallocation. These are not removed
+ * until the release function is called or the inode
+ * is inactivated. Hence we cannot assert here that
+ * ip->i_delayed_blks == 0.
+ */
+ }
+
+ lock = xfs_ilock_data_map_shared(ip);
+ } else {
+ lock = xfs_ilock_attr_map_shared(ip);
+ }
+
+ /*
+ * Don't let nex be bigger than the number of extents
+ * we can have assuming alternating holes and real extents.
+ */
+ if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
+ nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
+
+ bmapi_flags = xfs_bmapi_aflag(whichfork);
+ if (!(iflags & BMV_IF_PREALLOC))
+ bmapi_flags |= XFS_BMAPI_IGSTATE;
+
+ /*
+ * Allocate enough space to handle "subnex" maps at a time.
+ */
+ error = ENOMEM;
+ subnex = 16;
+ map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
+ if (!map)
+ goto out_unlock_ilock;
+
+ bmv->bmv_entries = 0;
+
+ if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
+ (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
+ error = 0;
+ goto out_free_map;
+ }
+
+ nexleft = nex;
+
+ do {
+ nmap = (nexleft > subnex) ? subnex : nexleft;
+ error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
+ XFS_BB_TO_FSB(mp, bmv->bmv_length),
+ map, &nmap, bmapi_flags);
+ if (error)
+ goto out_free_map;
+ ASSERT(nmap <= subnex);
+
+ for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
+ out[cur_ext].bmv_oflags = 0;
+ if (map[i].br_state == XFS_EXT_UNWRITTEN)
+ out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
+ else if (map[i].br_startblock == DELAYSTARTBLOCK)
+ out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
+ out[cur_ext].bmv_offset =
+ XFS_FSB_TO_BB(mp, map[i].br_startoff);
+ out[cur_ext].bmv_length =
+ XFS_FSB_TO_BB(mp, map[i].br_blockcount);
+ out[cur_ext].bmv_unused1 = 0;
+ out[cur_ext].bmv_unused2 = 0;
+
+ /*
+ * delayed allocation extents that start beyond EOF can
+ * occur due to speculative EOF allocation when the
+ * delalloc extent is larger than the largest freespace
+ * extent at conversion time. These extents cannot be
+ * converted by data writeback, so can exist here even
+ * if we are not supposed to be finding delalloc
+ * extents.
+ */
+ if (map[i].br_startblock == DELAYSTARTBLOCK &&
+ map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+ ASSERT((iflags & BMV_IF_DELALLOC) != 0);
+
+ if (map[i].br_startblock == HOLESTARTBLOCK &&
+ whichfork == XFS_ATTR_FORK) {
+ /* came to the end of attribute fork */
+ out[cur_ext].bmv_oflags |= BMV_OF_LAST;
+ goto out_free_map;
+ }
+
+ if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
+ prealloced, bmvend,
+ map[i].br_startblock))
+ goto out_free_map;
+
+ bmv->bmv_offset =
+ out[cur_ext].bmv_offset +
+ out[cur_ext].bmv_length;
+ bmv->bmv_length =
+ max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
+
+ /*
+ * In case we don't want to return the hole,
+ * don't increase cur_ext so that we can reuse
+ * it in the next loop.
+ */
+ if ((iflags & BMV_IF_NO_HOLES) &&
+ map[i].br_startblock == HOLESTARTBLOCK) {
+ memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
+ continue;
+ }
+
+ nexleft--;
+ bmv->bmv_entries++;
+ cur_ext++;
+ }
+ } while (nmap && nexleft && bmv->bmv_length);
+
+ out_free_map:
+ kmem_free(map);
+ out_unlock_ilock:
+ xfs_iunlock(ip, lock);
+ out_unlock_iolock:
+ xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+
+ for (i = 0; i < cur_ext; i++) {
+ int full = 0; /* user array is full */
+
+ /* format results & advance arg */
+ error = formatter(&arg, &out[i], &full);
+ if (error || full)
+ break;
+ }
+
+ kmem_free(out);
+ return error;
+}
+
+/*
+ * dead simple method of punching delalyed allocation blocks from a range in
+ * the inode. Walks a block at a time so will be slow, but is only executed in
+ * rare error cases so the overhead is not critical. This will always punch out
+ * both the start and end blocks, even if the ranges only partially overlap
+ * them, so it is up to the caller to ensure that partial blocks are not
+ * passed in.
+ */
+int
+xfs_bmap_punch_delalloc_range(
+ struct xfs_inode *ip,
+ xfs_fileoff_t start_fsb,
+ xfs_fileoff_t length)
+{
+ xfs_fileoff_t remaining = length;
+ int error = 0;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+ do {
+ int done;
+ xfs_bmbt_irec_t imap;
+ int nimaps = 1;
+ xfs_fsblock_t firstblock;
+ xfs_bmap_free_t flist;
+
+ /*
+ * Map the range first and check that it is a delalloc extent
+ * before trying to unmap the range. Otherwise we will be
+ * trying to remove a real extent (which requires a
+ * transaction) or a hole, which is probably a bad idea...
+ */
+ error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
+ XFS_BMAPI_ENTIRE);
+
+ if (error) {
+ /* something screwed, just bail */
+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ xfs_alert(ip->i_mount,
+ "Failed delalloc mapping lookup ino %lld fsb %lld.",
+ ip->i_ino, start_fsb);
+ }
+ break;
+ }
+ if (!nimaps) {
+ /* nothing there */
+ goto next_block;
+ }
+ if (imap.br_startblock != DELAYSTARTBLOCK) {
+ /* been converted, ignore */
+ goto next_block;
+ }
+ WARN_ON(imap.br_blockcount == 0);
+
+ /*
+ * Note: while we initialise the firstblock/flist pair, they
+ * should never be used because blocks should never be
+ * allocated or freed for a delalloc extent and hence we need
+ * don't cancel or finish them after the xfs_bunmapi() call.
+ */
+ xfs_bmap_init(&flist, &firstblock);
+ error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
+ &flist, &done);
+ if (error)
+ break;
+
+ ASSERT(!flist.xbf_count && !flist.xbf_first);
+next_block:
+ start_fsb++;
+ remaining--;
+ } while(remaining > 0);
+
+ return error;
+}
+
+/*
+ * Test whether it is appropriate to check an inode for and free post EOF
+ * blocks. The 'force' parameter determines whether we should also consider
+ * regular files that are marked preallocated or append-only.
+ */
+bool
+xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
+{
+ /* prealloc/delalloc exists only on regular files */
+ if (!S_ISREG(ip->i_d.di_mode))
+ return false;
+
+ /*
+ * Zero sized files with no cached pages and delalloc blocks will not
+ * have speculative prealloc/delalloc blocks to remove.
+ */
+ if (VFS_I(ip)->i_size == 0 &&
+ VN_CACHED(VFS_I(ip)) == 0 &&
+ ip->i_delayed_blks == 0)
+ return false;
+
+ /* If we haven't read in the extent list, then don't do it now. */
+ if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
+ return false;
+
+ /*
+ * Do not free real preallocated or append-only files unless the file
+ * has delalloc blocks and we are forced to remove them.
+ */
+ if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
+ if (!force || ip->i_delayed_blks == 0)
+ return false;
+
+ return true;
+}
+
+/*
+ * This is called by xfs_inactive to free any blocks beyond eof
+ * when the link count isn't zero and by xfs_dm_punch_hole() when
+ * punching a hole to EOF.
+ */
+int
+xfs_free_eofblocks(
+ xfs_mount_t *mp,
+ xfs_inode_t *ip,
+ bool need_iolock)
+{
+ xfs_trans_t *tp;
+ int error;
+ xfs_fileoff_t end_fsb;
+ xfs_fileoff_t last_fsb;
+ xfs_filblks_t map_len;
+ int nimaps;
+ xfs_bmbt_irec_t imap;
+
+ /*
+ * Figure out if there are any blocks beyond the end
+ * of the file. If not, then there is nothing to do.
+ */
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
+ last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
+ if (last_fsb <= end_fsb)
+ return 0;
+ map_len = last_fsb - end_fsb;
+
+ nimaps = 1;
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+ if (!error && (nimaps != 0) &&
+ (imap.br_startblock != HOLESTARTBLOCK ||
+ ip->i_delayed_blks)) {
+ /*
+ * Attach the dquots to the inode up front.
+ */
+ error = xfs_qm_dqattach(ip, 0);
+ if (error)
+ return error;
+
+ /*
+ * There are blocks after the end of file.
+ * Free them up now by truncating the file to
+ * its current size.
+ */
+ tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+
+ if (need_iolock) {
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
+ xfs_trans_cancel(tp, 0);
+ return EAGAIN;
+ }
+ }
+
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+ if (error) {
+ ASSERT(XFS_FORCED_SHUTDOWN(mp));
+ xfs_trans_cancel(tp, 0);
+ if (need_iolock)
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
+ /*
+ * Do not update the on-disk file size. If we update the
+ * on-disk file size and then the system crashes before the
+ * contents of the file are flushed to disk then the files
+ * may be full of holes (ie NULL files bug).
+ */
+ error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
+ XFS_ISIZE(ip));
+ if (error) {
+ /*
+ * If we get an error at this point we simply don't
+ * bother truncating the file.
+ */
+ xfs_trans_cancel(tp,
+ (XFS_TRANS_RELEASE_LOG_RES |
+ XFS_TRANS_ABORT));
+ } else {
+ error = xfs_trans_commit(tp,
+ XFS_TRANS_RELEASE_LOG_RES);
+ if (!error)
+ xfs_inode_clear_eofblocks_tag(ip);
+ }
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (need_iolock)
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ }
+ return error;
+}
+
+int
+xfs_alloc_file_space(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ xfs_off_t len,
+ int alloc_type)
+{
+ xfs_mount_t *mp = ip->i_mount;
+ xfs_off_t count;
+ xfs_filblks_t allocated_fsb;
+ xfs_filblks_t allocatesize_fsb;
+ xfs_extlen_t extsz, temp;
+ xfs_fileoff_t startoffset_fsb;
+ xfs_fsblock_t firstfsb;
+ int nimaps;
+ int quota_flag;
+ int rt;
+ xfs_trans_t *tp;
+ xfs_bmbt_irec_t imaps[1], *imapp;
+ xfs_bmap_free_t free_list;
+ uint qblocks, resblks, resrtextents;
+ int committed;
+ int error;
+
+ trace_xfs_alloc_file_space(ip);
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return XFS_ERROR(EIO);
+
+ error = xfs_qm_dqattach(ip, 0);
+ if (error)
+ return error;
+
+ if (len <= 0)
+ return XFS_ERROR(EINVAL);
+
+ rt = XFS_IS_REALTIME_INODE(ip);
+ extsz = xfs_get_extsz_hint(ip);
+
+ count = len;
+ imapp = &imaps[0];
+ nimaps = 1;
+ startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
+ allocatesize_fsb = XFS_B_TO_FSB(mp, count);
+
+ /*
+ * Allocate file space until done or until there is an error
+ */
+ while (allocatesize_fsb && !error) {
+ xfs_fileoff_t s, e;
+
+ /*
+ * Determine space reservations for data/realtime.
+ */
+ if (unlikely(extsz)) {
+ s = startoffset_fsb;
+ do_div(s, extsz);
+ s *= extsz;
+ e = startoffset_fsb + allocatesize_fsb;
+ if ((temp = do_mod(startoffset_fsb, extsz)))
+ e += temp;
+ if ((temp = do_mod(e, extsz)))
+ e += extsz - temp;
+ } else {
+ s = 0;
+ e = allocatesize_fsb;
+ }
+
+ /*
+ * The transaction reservation is limited to a 32-bit block
+ * count, hence we need to limit the number of blocks we are
+ * trying to reserve to avoid an overflow. We can't allocate
+ * more than @nimaps extents, and an extent is limited on disk
+ * to MAXEXTLEN (21 bits), so use that to enforce the limit.
+ */
+ resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
+ if (unlikely(rt)) {
+ resrtextents = qblocks = resblks;
+ resrtextents /= mp->m_sb.sb_rextsize;
+ resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+ quota_flag = XFS_QMOPT_RES_RTBLKS;
+ } else {
+ resrtextents = 0;
+ resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
+ quota_flag = XFS_QMOPT_RES_REGBLKS;
+ }
+
+ /*
+ * Allocate and setup the transaction.
+ */
+ tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
+ resblks, resrtextents);
+ /*
+ * Check for running out of space
+ */
+ if (error) {
+ /*
+ * Free the transaction structure.
+ */
+ ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
+ xfs_trans_cancel(tp, 0);
+ break;
+ }
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
+ 0, quota_flag);
+ if (error)
+ goto error1;
+
+ xfs_trans_ijoin(tp, ip, 0);
+
+ xfs_bmap_init(&free_list, &firstfsb);
+ error = xfs_bmapi_write(tp, ip, startoffset_fsb,
+ allocatesize_fsb, alloc_type, &firstfsb,
+ 0, imapp, &nimaps, &free_list);
+ if (error) {
+ goto error0;
+ }
+
+ /*
+ * Complete the transaction
+ */
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
+ if (error) {
+ goto error0;
+ }
+
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (error) {
+ break;
+ }
+
+ allocated_fsb = imapp->br_blockcount;
+
+ if (nimaps == 0) {
+ error = XFS_ERROR(ENOSPC);
+ break;
+ }
+
+ startoffset_fsb += allocated_fsb;
+ allocatesize_fsb -= allocated_fsb;
+ }
+
+ return error;
+
+error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
+ xfs_bmap_cancel(&free_list);
+ xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
+
+error1: /* Just cancel transaction */
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+}
+
+/*
+ * Zero file bytes between startoff and endoff inclusive.
+ * The iolock is held exclusive and no blocks are buffered.
+ *
+ * This function is used by xfs_free_file_space() to zero
+ * partial blocks when the range to free is not block aligned.
+ * When unreserving space with boundaries that are not block
+ * aligned we round up the start and round down the end
+ * boundaries and then use this function to zero the parts of
+ * the blocks that got dropped during the rounding.
+ */
+STATIC int
+xfs_zero_remaining_bytes(
+ xfs_inode_t *ip,
+ xfs_off_t startoff,
+ xfs_off_t endoff)
+{
+ xfs_bmbt_irec_t imap;
+ xfs_fileoff_t offset_fsb;
+ xfs_off_t lastoffset;
+ xfs_off_t offset;
+ xfs_buf_t *bp;
+ xfs_mount_t *mp = ip->i_mount;
+ int nimap;
+ int error = 0;
+
+ /*
+ * Avoid doing I/O beyond eof - it's not necessary
+ * since nothing can read beyond eof. The space will
+ * be zeroed when the file is extended anyway.
+ */
+ if (startoff >= XFS_ISIZE(ip))
+ return 0;
+
+ if (endoff > XFS_ISIZE(ip))
+ endoff = XFS_ISIZE(ip);
+
+ bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
+ mp->m_rtdev_targp : mp->m_ddev_targp,
+ BTOBB(mp->m_sb.sb_blocksize), 0);
+ if (!bp)
+ return XFS_ERROR(ENOMEM);
+
+ xfs_buf_unlock(bp);
+
+ for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
+ uint lock_mode;
+
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ nimap = 1;
+
+ lock_mode = xfs_ilock_data_map_shared(ip);
+ error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
+ xfs_iunlock(ip, lock_mode);
+
+ if (error || nimap < 1)
+ break;
+ ASSERT(imap.br_blockcount >= 1);
+ ASSERT(imap.br_startoff == offset_fsb);
+ lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
+ if (lastoffset > endoff)
+ lastoffset = endoff;
+ if (imap.br_startblock == HOLESTARTBLOCK)
+ continue;
+ ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+ if (imap.br_state == XFS_EXT_UNWRITTEN)
+ continue;
+ XFS_BUF_UNDONE(bp);
+ XFS_BUF_UNWRITE(bp);
+ XFS_BUF_READ(bp);
+ XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
+
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = XFS_ERROR(EIO);
+ break;
+ }
+ xfs_buf_iorequest(bp);
+ error = xfs_buf_iowait(bp);
+ if (error) {
+ xfs_buf_ioerror_alert(bp,
+ "xfs_zero_remaining_bytes(read)");
+ break;
+ }
+ memset(bp->b_addr +
+ (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
+ 0, lastoffset - offset + 1);
+ XFS_BUF_UNDONE(bp);
+ XFS_BUF_UNREAD(bp);
+ XFS_BUF_WRITE(bp);
+
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = XFS_ERROR(EIO);
+ break;
+ }
+ xfs_buf_iorequest(bp);
+ error = xfs_buf_iowait(bp);
+ if (error) {
+ xfs_buf_ioerror_alert(bp,
+ "xfs_zero_remaining_bytes(write)");
+ break;
+ }
+ }
+ xfs_buf_free(bp);
+ return error;
+}
+
+int
+xfs_free_file_space(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ xfs_off_t len)
+{
+ int committed;
+ int done;
+ xfs_fileoff_t endoffset_fsb;
+ int error;
+ xfs_fsblock_t firstfsb;
+ xfs_bmap_free_t free_list;
+ xfs_bmbt_irec_t imap;
+ xfs_off_t ioffset;
+ xfs_extlen_t mod=0;
+ xfs_mount_t *mp;
+ int nimap;
+ uint resblks;
+ xfs_off_t rounding;
+ int rt;
+ xfs_fileoff_t startoffset_fsb;
+ xfs_trans_t *tp;
+
+ mp = ip->i_mount;
+
+ trace_xfs_free_file_space(ip);
+
+ error = xfs_qm_dqattach(ip, 0);
+ if (error)
+ return error;
+
+ error = 0;
+ if (len <= 0) /* if nothing being freed */
+ return error;
+ rt = XFS_IS_REALTIME_INODE(ip);
+ startoffset_fsb = XFS_B_TO_FSB(mp, offset);
+ endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
+
+ /* wait for the completion of any pending DIOs */
+ inode_dio_wait(VFS_I(ip));
+
+ rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+ ioffset = offset & ~(rounding - 1);
+ error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+ ioffset, -1);
+ if (error)
+ goto out;
+ truncate_pagecache_range(VFS_I(ip), ioffset, -1);
+
+ /*
+ * Need to zero the stuff we're not freeing, on disk.
+ * If it's a realtime file & can't use unwritten extents then we
+ * actually need to zero the extent edges. Otherwise xfs_bunmapi
+ * will take care of it for us.
+ */
+ if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
+ nimap = 1;
+ error = xfs_bmapi_read(ip, startoffset_fsb, 1,
+ &imap, &nimap, 0);
+ if (error)
+ goto out;
+ ASSERT(nimap == 0 || nimap == 1);
+ if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
+ xfs_daddr_t block;
+
+ ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+ block = imap.br_startblock;
+ mod = do_div(block, mp->m_sb.sb_rextsize);
+ if (mod)
+ startoffset_fsb += mp->m_sb.sb_rextsize - mod;
+ }
+ nimap = 1;
+ error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
+ &imap, &nimap, 0);
+ if (error)
+ goto out;
+ ASSERT(nimap == 0 || nimap == 1);
+ if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
+ ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+ mod++;
+ if (mod && (mod != mp->m_sb.sb_rextsize))
+ endoffset_fsb -= mod;
+ }
+ }
+ if ((done = (endoffset_fsb <= startoffset_fsb)))
+ /*
+ * One contiguous piece to clear
+ */
+ error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
+ else {
+ /*
+ * Some full blocks, possibly two pieces to clear
+ */
+ if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
+ error = xfs_zero_remaining_bytes(ip, offset,
+ XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
+ if (!error &&
+ XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
+ error = xfs_zero_remaining_bytes(ip,
+ XFS_FSB_TO_B(mp, endoffset_fsb),
+ offset + len - 1);
+ }
+
+ /*
+ * free file space until done or until there is an error
+ */
+ resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+ while (!error && !done) {
+
+ /*
+ * allocate and setup the transaction. Allow this
+ * transaction to dip into the reserve blocks to ensure
+ * the freeing of the space succeeds at ENOSPC.
+ */
+ tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
+
+ /*
+ * check for running out of space
+ */
+ if (error) {
+ /*
+ * Free the transaction structure.
+ */
+ ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
+ xfs_trans_cancel(tp, 0);
+ break;
+ }
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_trans_reserve_quota(tp, mp,
+ ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
+ resblks, 0, XFS_QMOPT_RES_REGBLKS);
+ if (error)
+ goto error1;
+
+ xfs_trans_ijoin(tp, ip, 0);
+
+ /*
+ * issue the bunmapi() call to free the blocks
+ */
+ xfs_bmap_init(&free_list, &firstfsb);
+ error = xfs_bunmapi(tp, ip, startoffset_fsb,
+ endoffset_fsb - startoffset_fsb,
+ 0, 2, &firstfsb, &free_list, &done);
+ if (error) {
+ goto error0;
+ }
+
+ /*
+ * complete the transaction
+ */
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
+ if (error) {
+ goto error0;
+ }
+
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ }
+
+ out:
+ return error;
+
+ error0:
+ xfs_bmap_cancel(&free_list);
+ error1:
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ goto out;
+}
+
+
+int
+xfs_zero_file_space(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ xfs_off_t len)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ uint granularity;
+ xfs_off_t start_boundary;
+ xfs_off_t end_boundary;
+ int error;
+
+ trace_xfs_zero_file_space(ip);
+
+ granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+
+ /*
+ * Round the range of extents we are going to convert inwards. If the
+ * offset is aligned, then it doesn't get changed so we zero from the
+ * start of the block offset points to.
+ */
+ start_boundary = round_up(offset, granularity);
+ end_boundary = round_down(offset + len, granularity);
+
+ ASSERT(start_boundary >= offset);
+ ASSERT(end_boundary <= offset + len);
+
+ if (start_boundary < end_boundary - 1) {
+ /*
+ * punch out delayed allocation blocks and the page cache over
+ * the conversion range
+ */
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmap_punch_delalloc_range(ip,
+ XFS_B_TO_FSBT(mp, start_boundary),
+ XFS_B_TO_FSB(mp, end_boundary - start_boundary));
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ truncate_pagecache_range(VFS_I(ip), start_boundary,
+ end_boundary - 1);
+
+ /* convert the blocks */
+ error = xfs_alloc_file_space(ip, start_boundary,
+ end_boundary - start_boundary - 1,
+ XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
+ if (error)
+ goto out;
+
+ /* We've handled the interior of the range, now for the edges */
+ if (start_boundary != offset) {
+ error = xfs_iozero(ip, offset, start_boundary - offset);
+ if (error)
+ goto out;
+ }
+
+ if (end_boundary != offset + len)
+ error = xfs_iozero(ip, end_boundary,
+ offset + len - end_boundary);
+
+ } else {
+ /*
+ * It's either a sub-granularity range or the range spanned lies
+ * partially across two adjacent blocks.
+ */
+ error = xfs_iozero(ip, offset, len);
+ }
+
+out:
+ return error;
+
+}
+
+/*
+ * xfs_collapse_file_space()
+ * This routine frees disk space and shift extent for the given file.
+ * The first thing we do is to free data blocks in the specified range
+ * by calling xfs_free_file_space(). It would also sync dirty data
+ * and invalidate page cache over the region on which collapse range
+ * is working. And Shift extent records to the left to cover a hole.
+ * RETURNS:
+ * 0 on success
+ * errno on error
+ *
+ */
+int
+xfs_collapse_file_space(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ xfs_off_t len)
+{
+ int done = 0;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+ xfs_extnum_t current_ext = 0;
+ struct xfs_bmap_free free_list;
+ xfs_fsblock_t first_block;
+ int committed;
+ xfs_fileoff_t start_fsb;
+ xfs_fileoff_t shift_fsb;
+
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+
+ trace_xfs_collapse_file_space(ip);
+
+ start_fsb = XFS_B_TO_FSB(mp, offset + len);
+ shift_fsb = XFS_B_TO_FSB(mp, len);
+
+ error = xfs_free_file_space(ip, offset, len);
+ if (error)
+ return error;
+
+ while (!error && !done) {
+ tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+ /*
+ * We would need to reserve permanent block for transaction.
+ * This will come into picture when after shifting extent into
+ * hole we found that adjacent extents can be merged which
+ * may lead to freeing of a block during record update.
+ */
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
+ XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ break;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
+ ip->i_gdquot, ip->i_pdquot,
+ XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
+ XFS_QMOPT_RES_REGBLKS);
+ if (error)
+ goto out;
+
+ xfs_trans_ijoin(tp, ip, 0);
+
+ xfs_bmap_init(&free_list, &first_block);
+
+ /*
+ * We are using the write transaction in which max 2 bmbt
+ * updates are allowed
+ */
+ error = xfs_bmap_shift_extents(tp, ip, &done, start_fsb,
+ shift_fsb, &current_ext,
+ &first_block, &free_list,
+ XFS_BMAP_MAX_SHIFT_EXTENTS);
+ if (error)
+ goto out;
+
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
+ if (error)
+ goto out;
+
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ }
+
+ return error;
+
+out:
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+}
+
+/*
+ * We need to check that the format of the data fork in the temporary inode is
+ * valid for the target inode before doing the swap. This is not a problem with
+ * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
+ * data fork depending on the space the attribute fork is taking so we can get
+ * invalid formats on the target inode.
+ *
+ * E.g. target has space for 7 extents in extent format, temp inode only has
+ * space for 6. If we defragment down to 7 extents, then the tmp format is a
+ * btree, but when swapped it needs to be in extent format. Hence we can't just
+ * blindly swap data forks on attr2 filesystems.
+ *
+ * Note that we check the swap in both directions so that we don't end up with
+ * a corrupt temporary inode, either.
+ *
+ * Note that fixing the way xfs_fsr sets up the attribute fork in the source
+ * inode will prevent this situation from occurring, so all we do here is
+ * reject and log the attempt. basically we are putting the responsibility on
+ * userspace to get this right.
+ */
+static int
+xfs_swap_extents_check_format(
+ xfs_inode_t *ip, /* target inode */
+ xfs_inode_t *tip) /* tmp inode */
+{
+
+ /* Should never get a local format */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
+ tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+ return EINVAL;
+
+ /*
+ * if the target inode has less extents that then temporary inode then
+ * why did userspace call us?
+ */
+ if (ip->i_d.di_nextents < tip->i_d.di_nextents)
+ return EINVAL;
+
+ /*
+ * if the target inode is in extent form and the temp inode is in btree
+ * form then we will end up with the target inode in the wrong format
+ * as we already know there are less extents in the temp inode.
+ */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ return EINVAL;
+
+ /* Check temp in extent form to max in target */
+ if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
+ XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
+ return EINVAL;
+
+ /* Check target in extent form to max in temp */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
+ XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
+ return EINVAL;
+
+ /*
+ * If we are in a btree format, check that the temp root block will fit
+ * in the target and that it has enough extents to be in btree format
+ * in the target.
+ *
+ * Note that we have to be careful to allow btree->extent conversions
+ * (a common defrag case) which will occur when the temp inode is in
+ * extent format...
+ */
+ if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+ if (XFS_IFORK_BOFF(ip) &&
+ XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
+ return EINVAL;
+ if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
+ XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
+ return EINVAL;
+ }
+
+ /* Reciprocal target->temp btree format checks */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+ if (XFS_IFORK_BOFF(tip) &&
+ XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
+ return EINVAL;
+ if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
+ XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+int
+xfs_swap_extents(
+ xfs_inode_t *ip, /* target inode */
+ xfs_inode_t *tip, /* tmp inode */
+ xfs_swapext_t *sxp)
+{
+ xfs_mount_t *mp = ip->i_mount;
+ xfs_trans_t *tp;
+ xfs_bstat_t *sbp = &sxp->sx_stat;
+ xfs_ifork_t *tempifp, *ifp, *tifp;
+ int src_log_flags, target_log_flags;
+ int error = 0;
+ int aforkblks = 0;
+ int taforkblks = 0;
+ __uint64_t tmp;
+
+ tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
+ if (!tempifp) {
+ error = XFS_ERROR(ENOMEM);
+ goto out;
+ }
+
+ /*
+ * we have to do two separate lock calls here to keep lockdep
+ * happy. If we try to get all the locks in one call, lock will
+ * report false positives when we drop the ILOCK and regain them
+ * below.
+ */
+ xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
+ xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
+
+ /* Verify that both files have the same format */
+ if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
+ error = XFS_ERROR(EINVAL);
+ goto out_unlock;
+ }
+
+ /* Verify both files are either real-time or non-realtime */
+ if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
+ error = XFS_ERROR(EINVAL);
+ goto out_unlock;
+ }
+
+ error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
+ if (error)
+ goto out_unlock;
+ truncate_pagecache_range(VFS_I(tip), 0, -1);
+
+ /* Verify O_DIRECT for ftmp */
+ if (VN_CACHED(VFS_I(tip)) != 0) {
+ error = XFS_ERROR(EINVAL);
+ goto out_unlock;
+ }
+
+ /* Verify all data are being swapped */
+ if (sxp->sx_offset != 0 ||
+ sxp->sx_length != ip->i_d.di_size ||
+ sxp->sx_length != tip->i_d.di_size) {
+ error = XFS_ERROR(EFAULT);
+ goto out_unlock;
+ }
+
+ trace_xfs_swap_extent_before(ip, 0);
+ trace_xfs_swap_extent_before(tip, 1);
+
+ /* check inode formats now that data is flushed */
+ error = xfs_swap_extents_check_format(ip, tip);
+ if (error) {
+ xfs_notice(mp,
+ "%s: inode 0x%llx format is incompatible for exchanging.",
+ __func__, ip->i_ino);
+ goto out_unlock;
+ }
+
+ /*
+ * Compare the current change & modify times with that
+ * passed in. If they differ, we abort this swap.
+ * This is the mechanism used to ensure the calling
+ * process that the file was not changed out from
+ * under it.
+ */
+ if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
+ (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
+ (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
+ (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
+ error = XFS_ERROR(EBUSY);
+ goto out_unlock;
+ }
+
+ /* We need to fail if the file is memory mapped. Once we have tossed
+ * all existing pages, the page fault will have no option
+ * but to go to the filesystem for pages. By making the page fault call
+ * vop_read (or write in the case of autogrow) they block on the iolock
+ * until we have switched the extents.
+ */
+ if (VN_MAPPED(VFS_I(ip))) {
+ error = XFS_ERROR(EBUSY);
+ goto out_unlock;
+ }
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_iunlock(tip, XFS_ILOCK_EXCL);
+
+ /*
+ * There is a race condition here since we gave up the
+ * ilock. However, the data fork will not change since
+ * we have the iolock (locked for truncation too) so we
+ * are safe. We don't really care if non-io related
+ * fields change.
+ */
+ truncate_pagecache_range(VFS_I(ip), 0, -1);
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
+ error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+ if (error) {
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ xfs_iunlock(tip, XFS_IOLOCK_EXCL);
+ xfs_trans_cancel(tp, 0);
+ goto out;
+ }
+ xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
+
+ /*
+ * Count the number of extended attribute blocks
+ */
+ if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
+ (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
+ error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
+ if (error)
+ goto out_trans_cancel;
+ }
+ if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
+ (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
+ error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
+ &taforkblks);
+ if (error)
+ goto out_trans_cancel;
+ }
+
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+ xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
+ /*
+ * Before we've swapped the forks, lets set the owners of the forks
+ * appropriately. We have to do this as we are demand paging the btree
+ * buffers, and so the validation done on read will expect the owner
+ * field to be correctly set. Once we change the owners, we can swap the
+ * inode forks.
+ *
+ * Note the trickiness in setting the log flags - we set the owner log
+ * flag on the opposite inode (i.e. the inode we are setting the new
+ * owner to be) because once we swap the forks and log that, log
+ * recovery is going to see the fork as owned by the swapped inode,
+ * not the pre-swapped inodes.
+ */
+ src_log_flags = XFS_ILOG_CORE;
+ target_log_flags = XFS_ILOG_CORE;
+ if (ip->i_d.di_version == 3 &&
+ ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+ target_log_flags |= XFS_ILOG_DOWNER;
+ error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
+ tip->i_ino, NULL);
+ if (error)
+ goto out_trans_cancel;
+ }
+
+ if (tip->i_d.di_version == 3 &&
+ tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+ src_log_flags |= XFS_ILOG_DOWNER;
+ error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
+ ip->i_ino, NULL);
+ if (error)
+ goto out_trans_cancel;
+ }
+
+ /*
+ * Swap the data forks of the inodes
+ */
+ ifp = &ip->i_df;
+ tifp = &tip->i_df;
+ *tempifp = *ifp; /* struct copy */
+ *ifp = *tifp; /* struct copy */
+ *tifp = *tempifp; /* struct copy */
+
+ /*
+ * Fix the on-disk inode values
+ */
+ tmp = (__uint64_t)ip->i_d.di_nblocks;
+ ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
+ tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
+
+ tmp = (__uint64_t) ip->i_d.di_nextents;
+ ip->i_d.di_nextents = tip->i_d.di_nextents;
+ tip->i_d.di_nextents = tmp;
+
+ tmp = (__uint64_t) ip->i_d.di_format;
+ ip->i_d.di_format = tip->i_d.di_format;
+ tip->i_d.di_format = tmp;
+
+ /*
+ * The extents in the source inode could still contain speculative
+ * preallocation beyond EOF (e.g. the file is open but not modified
+ * while defrag is in progress). In that case, we need to copy over the
+ * number of delalloc blocks the data fork in the source inode is
+ * tracking beyond EOF so that when the fork is truncated away when the
+ * temporary inode is unlinked we don't underrun the i_delayed_blks
+ * counter on that inode.
+ */
+ ASSERT(tip->i_delayed_blks == 0);
+ tip->i_delayed_blks = ip->i_delayed_blks;
+ ip->i_delayed_blks = 0;
+
+ switch (ip->i_d.di_format) {
+ case XFS_DINODE_FMT_EXTENTS:
+ /* If the extents fit in the inode, fix the
+ * pointer. Otherwise it's already NULL or
+ * pointing to the extent.
+ */
+ if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
+ ifp->if_u1.if_extents =
+ ifp->if_u2.if_inline_ext;
+ }
+ src_log_flags |= XFS_ILOG_DEXT;
+ break;
+ case XFS_DINODE_FMT_BTREE:
+ ASSERT(ip->i_d.di_version < 3 ||
+ (src_log_flags & XFS_ILOG_DOWNER));
+ src_log_flags |= XFS_ILOG_DBROOT;
+ break;
+ }
+
+ switch (tip->i_d.di_format) {
+ case XFS_DINODE_FMT_EXTENTS:
+ /* If the extents fit in the inode, fix the
+ * pointer. Otherwise it's already NULL or
+ * pointing to the extent.
+ */
+ if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
+ tifp->if_u1.if_extents =
+ tifp->if_u2.if_inline_ext;
+ }
+ target_log_flags |= XFS_ILOG_DEXT;
+ break;
+ case XFS_DINODE_FMT_BTREE:
+ target_log_flags |= XFS_ILOG_DBROOT;
+ ASSERT(tip->i_d.di_version < 3 ||
+ (target_log_flags & XFS_ILOG_DOWNER));
+ break;
+ }
+
+ xfs_trans_log_inode(tp, ip, src_log_flags);
+ xfs_trans_log_inode(tp, tip, target_log_flags);
+
+ /*
+ * If this is a synchronous mount, make sure that the
+ * transaction goes to disk before returning to the user.
+ */
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(tp);
+
+ error = xfs_trans_commit(tp, 0);
+
+ trace_xfs_swap_extent_after(ip, 0);
+ trace_xfs_swap_extent_after(tip, 1);
+out:
+ kmem_free(tempifp);
+ return error;
+
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+ xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+ goto out;
+
+out_trans_cancel:
+ xfs_trans_cancel(tp, 0);
+ goto out_unlock;
+}
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
new file mode 100644
index 00000000000..2fdb72d2c90
--- /dev/null
+++ b/fs/xfs/xfs_bmap_util.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __XFS_BMAP_UTIL_H__
+#define __XFS_BMAP_UTIL_H__
+
+/* Kernel only BMAP related definitions and functions */
+
+struct xfs_bmbt_irec;
+struct xfs_bmap_free_item;
+struct xfs_ifork;
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * Argument structure for xfs_bmap_alloc.
+ */
+struct xfs_bmalloca {
+ xfs_fsblock_t *firstblock; /* i/o first block allocated */
+ struct xfs_bmap_free *flist; /* bmap freelist */
+ struct xfs_trans *tp; /* transaction pointer */
+ struct xfs_inode *ip; /* incore inode pointer */
+ struct xfs_bmbt_irec prev; /* extent before the new one */
+ struct xfs_bmbt_irec got; /* extent after, or delayed */
+
+ xfs_fileoff_t offset; /* offset in file filling in */
+ xfs_extlen_t length; /* i/o length asked/allocated */
+ xfs_fsblock_t blkno; /* starting block of new extent */
+
+ struct xfs_btree_cur *cur; /* btree cursor */
+ xfs_extnum_t idx; /* current extent index */
+ int nallocs;/* number of extents alloc'd */
+ int logflags;/* flags for transaction logging */
+
+ xfs_extlen_t total; /* total blocks needed for xaction */
+ xfs_extlen_t minlen; /* minimum allocation size (blocks) */
+ xfs_extlen_t minleft; /* amount must be left after alloc */
+ bool eof; /* set if allocating past last extent */
+ bool wasdel; /* replacing a delayed allocation */
+ bool userdata;/* set if is user data */
+ bool aeof; /* allocated space at eof */
+ bool conv; /* overwriting unwritten extents */
+ int flags;
+ struct completion *done;
+ struct work_struct work;
+ int result;
+};
+
+int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
+ int *committed);
+int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
+int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
+ int whichfork, int *eof);
+int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
+ int whichfork, int *count);
+int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
+ xfs_fileoff_t start_fsb, xfs_fileoff_t length);
+
+/* bmap to userspace formatter - copy to user & advance pointer */
+typedef int (*xfs_bmap_format_t)(void **, struct getbmapx *, int *);
+int xfs_getbmap(struct xfs_inode *ip, struct getbmapx *bmv,
+ xfs_bmap_format_t formatter, void *arg);
+
+/* functions in xfs_bmap.c that are only needed by xfs_bmap_util.c */
+void xfs_bmap_del_free(struct xfs_bmap_free *flist,
+ struct xfs_bmap_free_item *prev,
+ struct xfs_bmap_free_item *free);
+int xfs_bmap_extsize_align(struct xfs_mount *mp, struct xfs_bmbt_irec *gotp,
+ struct xfs_bmbt_irec *prevp, xfs_extlen_t extsz,
+ int rt, int eof, int delay, int convert,
+ xfs_fileoff_t *offp, xfs_extlen_t *lenp);
+void xfs_bmap_adjacent(struct xfs_bmalloca *ap);
+int xfs_bmap_last_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ int whichfork, struct xfs_bmbt_irec *rec,
+ int *is_empty);
+
+/* preallocation and hole punch interface */
+int xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset,
+ xfs_off_t len, int alloc_type);
+int xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
+ xfs_off_t len);
+int xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
+ xfs_off_t len);
+int xfs_collapse_file_space(struct xfs_inode *, xfs_off_t offset,
+ xfs_off_t len);
+
+/* EOF block manipulation functions */
+bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
+int xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip,
+ bool need_iolock);
+
+int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
+ struct xfs_swapext *sx);
+
+xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
+
+#endif /* __XFS_BMAP_UTIL_H__ */
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index aeb87ca69fc..cf893bc1e37 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -17,26 +17,23 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_inode_item.h"
+#include "xfs_buf_item.h"
#include "xfs_btree.h"
-#include "xfs_ialloc.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_alloc.h"
/*
* Cursor allocation zone.
@@ -46,316 +43,242 @@ kmem_zone_t *xfs_btree_cur_zone;
/*
* Btree magic numbers.
*/
-const __uint32_t xfs_magics[XFS_BTNUM_MAX] =
-{
- XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC
+static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
+ { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
+ XFS_FIBT_MAGIC },
+ { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC,
+ XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC }
};
+#define xfs_btree_magic(cur) \
+ xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
-/*
- * Prototypes for internal routines.
- */
-/*
- * Checking routine: return maxrecs for the block.
- */
-STATIC int /* number of records fitting in block */
-xfs_btree_maxrecs(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_btree_block_t *block);/* generic btree block pointer */
+STATIC int /* error (0 or EFSCORRUPTED) */
+xfs_btree_check_lblock(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ struct xfs_btree_block *block, /* btree long form block pointer */
+ int level, /* level of the btree block */
+ struct xfs_buf *bp) /* buffer for block, if any */
+{
+ int lblock_ok = 1; /* block passes checks */
+ struct xfs_mount *mp; /* file system mount point */
-/*
- * Internal routines.
- */
+ mp = cur->bc_mp;
-/*
- * Retrieve the block pointer from the cursor at the given level.
- * This may be a bmap btree root or from a buffer.
- */
-STATIC xfs_btree_block_t * /* generic btree block pointer */
-xfs_btree_get_block(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level, /* level in btree */
- struct xfs_buf **bpp); /* buffer containing the block */
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ lblock_ok = lblock_ok &&
+ uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid) &&
+ block->bb_u.l.bb_blkno == cpu_to_be64(
+ bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
+ }
-/*
- * Checking routine: return maxrecs for the block.
- */
-STATIC int /* number of records fitting in block */
-xfs_btree_maxrecs(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_btree_block_t *block) /* generic btree block pointer */
-{
- switch (cur->bc_btnum) {
- case XFS_BTNUM_BNO:
- case XFS_BTNUM_CNT:
- return (int)XFS_ALLOC_BLOCK_MAXRECS(
- be16_to_cpu(block->bb_h.bb_level), cur);
- case XFS_BTNUM_BMAP:
- return (int)XFS_BMAP_BLOCK_IMAXRECS(
- be16_to_cpu(block->bb_h.bb_level), cur);
- case XFS_BTNUM_INO:
- return (int)XFS_INOBT_BLOCK_MAXRECS(
- be16_to_cpu(block->bb_h.bb_level), cur);
- default:
- ASSERT(0);
- return 0;
+ lblock_ok = lblock_ok &&
+ be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
+ be16_to_cpu(block->bb_level) == level &&
+ be16_to_cpu(block->bb_numrecs) <=
+ cur->bc_ops->get_maxrecs(cur, level) &&
+ block->bb_u.l.bb_leftsib &&
+ (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO) ||
+ XFS_FSB_SANITY_CHECK(mp,
+ be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
+ block->bb_u.l.bb_rightsib &&
+ (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO) ||
+ XFS_FSB_SANITY_CHECK(mp,
+ be64_to_cpu(block->bb_u.l.bb_rightsib)));
+
+ if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
+ XFS_ERRTAG_BTREE_CHECK_LBLOCK,
+ XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
+ if (bp)
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ return XFS_ERROR(EFSCORRUPTED);
}
+ return 0;
}
-/*
- * External routines.
- */
+STATIC int /* error (0 or EFSCORRUPTED) */
+xfs_btree_check_sblock(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ struct xfs_btree_block *block, /* btree short form block pointer */
+ int level, /* level of the btree block */
+ struct xfs_buf *bp) /* buffer containing block */
+{
+ struct xfs_mount *mp; /* file system mount point */
+ struct xfs_buf *agbp; /* buffer for ag. freespace struct */
+ struct xfs_agf *agf; /* ag. freespace structure */
+ xfs_agblock_t agflen; /* native ag. freespace length */
+ int sblock_ok = 1; /* block passes checks */
+
+ mp = cur->bc_mp;
+ agbp = cur->bc_private.a.agbp;
+ agf = XFS_BUF_TO_AGF(agbp);
+ agflen = be32_to_cpu(agf->agf_length);
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ sblock_ok = sblock_ok &&
+ uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid) &&
+ block->bb_u.s.bb_blkno == cpu_to_be64(
+ bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
+ }
+
+ sblock_ok = sblock_ok &&
+ be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
+ be16_to_cpu(block->bb_level) == level &&
+ be16_to_cpu(block->bb_numrecs) <=
+ cur->bc_ops->get_maxrecs(cur, level) &&
+ (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) ||
+ be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
+ block->bb_u.s.bb_leftsib &&
+ (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) ||
+ be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
+ block->bb_u.s.bb_rightsib;
+
+ if (unlikely(XFS_TEST_ERROR(!sblock_ok, mp,
+ XFS_ERRTAG_BTREE_CHECK_SBLOCK,
+ XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
+ if (bp)
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+ return 0;
+}
-#ifdef DEBUG
/*
* Debug routine: check that block header is ok.
*/
-void
+int
xfs_btree_check_block(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_btree_block_t *block, /* generic btree block pointer */
+ struct xfs_btree_cur *cur, /* btree cursor */
+ struct xfs_btree_block *block, /* generic btree block pointer */
int level, /* level of the btree block */
- xfs_buf_t *bp) /* buffer containing block, if any */
+ struct xfs_buf *bp) /* buffer containing block, if any */
{
- if (XFS_BTREE_LONG_PTRS(cur->bc_btnum))
- xfs_btree_check_lblock(cur, (xfs_btree_lblock_t *)block, level,
- bp);
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ return xfs_btree_check_lblock(cur, block, level, bp);
else
- xfs_btree_check_sblock(cur, (xfs_btree_sblock_t *)block, level,
- bp);
-}
-
-/*
- * Debug routine: check that keys are in the right order.
- */
-void
-xfs_btree_check_key(
- xfs_btnum_t btnum, /* btree identifier */
- void *ak1, /* pointer to left (lower) key */
- void *ak2) /* pointer to right (higher) key */
-{
- switch (btnum) {
- case XFS_BTNUM_BNO: {
- xfs_alloc_key_t *k1;
- xfs_alloc_key_t *k2;
-
- k1 = ak1;
- k2 = ak2;
- ASSERT(be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock));
- break;
- }
- case XFS_BTNUM_CNT: {
- xfs_alloc_key_t *k1;
- xfs_alloc_key_t *k2;
-
- k1 = ak1;
- k2 = ak2;
- ASSERT(be32_to_cpu(k1->ar_blockcount) < be32_to_cpu(k2->ar_blockcount) ||
- (k1->ar_blockcount == k2->ar_blockcount &&
- be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock)));
- break;
- }
- case XFS_BTNUM_BMAP: {
- xfs_bmbt_key_t *k1;
- xfs_bmbt_key_t *k2;
-
- k1 = ak1;
- k2 = ak2;
- ASSERT(be64_to_cpu(k1->br_startoff) < be64_to_cpu(k2->br_startoff));
- break;
- }
- case XFS_BTNUM_INO: {
- xfs_inobt_key_t *k1;
- xfs_inobt_key_t *k2;
-
- k1 = ak1;
- k2 = ak2;
- ASSERT(be32_to_cpu(k1->ir_startino) < be32_to_cpu(k2->ir_startino));
- break;
- }
- default:
- ASSERT(0);
- }
+ return xfs_btree_check_sblock(cur, block, level, bp);
}
-#endif /* DEBUG */
/*
- * Checking routine: check that long form block header is ok.
+ * Check that (long) pointer is ok.
*/
-/* ARGSUSED */
int /* error (0 or EFSCORRUPTED) */
-xfs_btree_check_lblock(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_btree_lblock_t *block, /* btree long form block pointer */
- int level, /* level of the btree block */
- xfs_buf_t *bp) /* buffer for block, if any */
+xfs_btree_check_lptr(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_dfsbno_t bno, /* btree block disk address */
+ int level) /* btree block level */
{
- int lblock_ok; /* block passes checks */
- xfs_mount_t *mp; /* file system mount point */
-
- mp = cur->bc_mp;
- lblock_ok =
- be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] &&
- be16_to_cpu(block->bb_level) == level &&
- be16_to_cpu(block->bb_numrecs) <=
- xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) &&
- block->bb_leftsib &&
- (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO ||
- XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_leftsib))) &&
- block->bb_rightsib &&
- (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO ||
- XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_rightsib)));
- if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK,
- XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
- if (bp)
- xfs_buftrace("LBTREE ERROR", bp);
- XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW,
- mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
+ XFS_WANT_CORRUPTED_RETURN(
+ level > 0 &&
+ bno != NULLDFSBNO &&
+ XFS_FSB_SANITY_CHECK(cur->bc_mp, bno));
return 0;
}
+#ifdef DEBUG
/*
- * Checking routine: check that (long) pointer is ok.
+ * Check that (short) pointer is ok.
*/
-int /* error (0 or EFSCORRUPTED) */
-xfs_btree_check_lptr(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_dfsbno_t ptr, /* btree block disk address */
- int level) /* btree block level */
+STATIC int /* error (0 or EFSCORRUPTED) */
+xfs_btree_check_sptr(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t bno, /* btree block disk address */
+ int level) /* btree block level */
{
- xfs_mount_t *mp; /* file system mount point */
+ xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks;
- mp = cur->bc_mp;
XFS_WANT_CORRUPTED_RETURN(
level > 0 &&
- ptr != NULLDFSBNO &&
- XFS_FSB_SANITY_CHECK(mp, ptr));
+ bno != NULLAGBLOCK &&
+ bno != 0 &&
+ bno < agblocks);
return 0;
}
-#ifdef DEBUG
/*
- * Debug routine: check that records are in the right order.
+ * Check that block ptr is ok.
*/
-void
-xfs_btree_check_rec(
- xfs_btnum_t btnum, /* btree identifier */
- void *ar1, /* pointer to left (lower) record */
- void *ar2) /* pointer to right (higher) record */
-{
- switch (btnum) {
- case XFS_BTNUM_BNO: {
- xfs_alloc_rec_t *r1;
- xfs_alloc_rec_t *r2;
-
- r1 = ar1;
- r2 = ar2;
- ASSERT(be32_to_cpu(r1->ar_startblock) +
- be32_to_cpu(r1->ar_blockcount) <=
- be32_to_cpu(r2->ar_startblock));
- break;
- }
- case XFS_BTNUM_CNT: {
- xfs_alloc_rec_t *r1;
- xfs_alloc_rec_t *r2;
-
- r1 = ar1;
- r2 = ar2;
- ASSERT(be32_to_cpu(r1->ar_blockcount) < be32_to_cpu(r2->ar_blockcount) ||
- (r1->ar_blockcount == r2->ar_blockcount &&
- be32_to_cpu(r1->ar_startblock) < be32_to_cpu(r2->ar_startblock)));
- break;
- }
- case XFS_BTNUM_BMAP: {
- xfs_bmbt_rec_t *r1;
- xfs_bmbt_rec_t *r2;
-
- r1 = ar1;
- r2 = ar2;
- ASSERT(xfs_bmbt_disk_get_startoff(r1) +
- xfs_bmbt_disk_get_blockcount(r1) <=
- xfs_bmbt_disk_get_startoff(r2));
- break;
- }
- case XFS_BTNUM_INO: {
- xfs_inobt_rec_t *r1;
- xfs_inobt_rec_t *r2;
-
- r1 = ar1;
- r2 = ar2;
- ASSERT(be32_to_cpu(r1->ir_startino) + XFS_INODES_PER_CHUNK <=
- be32_to_cpu(r2->ir_startino));
- break;
- }
- default:
- ASSERT(0);
+STATIC int /* error (0 or EFSCORRUPTED) */
+xfs_btree_check_ptr(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ union xfs_btree_ptr *ptr, /* btree block disk address */
+ int index, /* offset from ptr to check */
+ int level) /* btree block level */
+{
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+ return xfs_btree_check_lptr(cur,
+ be64_to_cpu((&ptr->l)[index]), level);
+ } else {
+ return xfs_btree_check_sptr(cur,
+ be32_to_cpu((&ptr->s)[index]), level);
}
}
-#endif /* DEBUG */
+#endif
/*
- * Checking routine: check that block header is ok.
+ * Calculate CRC on the whole btree block and stuff it into the
+ * long-form btree header.
+ *
+ * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
+ * it into the buffer so recovery knows what the last modifcation was that made
+ * it to disk.
*/
-/* ARGSUSED */
-int /* error (0 or EFSCORRUPTED) */
-xfs_btree_check_sblock(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_btree_sblock_t *block, /* btree short form block pointer */
- int level, /* level of the btree block */
- xfs_buf_t *bp) /* buffer containing block */
+void
+xfs_btree_lblock_calc_crc(
+ struct xfs_buf *bp)
{
- xfs_buf_t *agbp; /* buffer for ag. freespace struct */
- xfs_agf_t *agf; /* ag. freespace structure */
- xfs_agblock_t agflen; /* native ag. freespace length */
- int sblock_ok; /* block passes checks */
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ struct xfs_buf_log_item *bip = bp->b_fspriv;
- agbp = cur->bc_private.a.agbp;
- agf = XFS_BUF_TO_AGF(agbp);
- agflen = be32_to_cpu(agf->agf_length);
- sblock_ok =
- be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] &&
- be16_to_cpu(block->bb_level) == level &&
- be16_to_cpu(block->bb_numrecs) <=
- xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) &&
- (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK ||
- be32_to_cpu(block->bb_leftsib) < agflen) &&
- block->bb_leftsib &&
- (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK ||
- be32_to_cpu(block->bb_rightsib) < agflen) &&
- block->bb_rightsib;
- if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp,
- XFS_ERRTAG_BTREE_CHECK_SBLOCK,
- XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
- if (bp)
- xfs_buftrace("SBTREE ERROR", bp);
- XFS_ERROR_REPORT("xfs_btree_check_sblock", XFS_ERRLEVEL_LOW,
- cur->bc_mp);
- return XFS_ERROR(EFSCORRUPTED);
- }
- return 0;
+ if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
+ return;
+ if (bip)
+ block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+ xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
+}
+
+bool
+xfs_btree_lblock_verify_crc(
+ struct xfs_buf *bp)
+{
+ if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
+ return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
+
+ return true;
}
/*
- * Checking routine: check that (short) pointer is ok.
+ * Calculate CRC on the whole btree block and stuff it into the
+ * short-form btree header.
+ *
+ * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
+ * it into the buffer so recovery knows what the last modifcation was that made
+ * it to disk.
*/
-int /* error (0 or EFSCORRUPTED) */
-xfs_btree_check_sptr(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_agblock_t ptr, /* btree block disk address */
- int level) /* btree block level */
+void
+xfs_btree_sblock_calc_crc(
+ struct xfs_buf *bp)
{
- xfs_buf_t *agbp; /* buffer for ag. freespace struct */
- xfs_agf_t *agf; /* ag. freespace structure */
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ struct xfs_buf_log_item *bip = bp->b_fspriv;
- agbp = cur->bc_private.a.agbp;
- agf = XFS_BUF_TO_AGF(agbp);
- XFS_WANT_CORRUPTED_RETURN(
- level > 0 &&
- ptr != NULLAGBLOCK && ptr != 0 &&
- ptr < be32_to_cpu(agf->agf_length));
- return 0;
+ if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
+ return;
+ if (bip)
+ block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+ xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
+}
+
+bool
+xfs_btree_sblock_verify_crc(
+ struct xfs_buf *bp)
+{
+ if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
+ return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
+
+ return true;
}
/*
@@ -380,7 +303,7 @@ xfs_btree_del_cursor(
*/
for (i = 0; i < cur->bc_nlevels; i++) {
if (cur->bc_bufs[i])
- xfs_btree_setbuf(cur, i, NULL);
+ xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
else if (!error)
break;
}
@@ -414,104 +337,210 @@ xfs_btree_dup_cursor(
tp = cur->bc_tp;
mp = cur->bc_mp;
+
/*
* Allocate a new cursor like the old one.
*/
- new = xfs_btree_init_cursor(mp, tp, cur->bc_private.a.agbp,
- cur->bc_private.a.agno, cur->bc_btnum, cur->bc_private.b.ip,
- cur->bc_private.b.whichfork);
+ new = cur->bc_ops->dup_cursor(cur);
+
/*
* Copy the record currently in the cursor.
*/
new->bc_rec = cur->bc_rec;
+
/*
* For each level current, re-get the buffer and copy the ptr value.
*/
for (i = 0; i < new->bc_nlevels; i++) {
new->bc_ptrs[i] = cur->bc_ptrs[i];
new->bc_ra[i] = cur->bc_ra[i];
- if ((bp = cur->bc_bufs[i])) {
- if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
- XFS_BUF_ADDR(bp), mp->m_bsize, 0, &bp))) {
+ bp = cur->bc_bufs[i];
+ if (bp) {
+ error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+ XFS_BUF_ADDR(bp), mp->m_bsize,
+ 0, &bp,
+ cur->bc_ops->buf_ops);
+ if (error) {
xfs_btree_del_cursor(new, error);
*ncur = NULL;
return error;
}
- new->bc_bufs[i] = bp;
- ASSERT(bp);
- ASSERT(!XFS_BUF_GETERROR(bp));
- } else
- new->bc_bufs[i] = NULL;
- }
- /*
- * For bmap btrees, copy the firstblock, flist, and flags values,
- * since init cursor doesn't get them.
- */
- if (new->bc_btnum == XFS_BTNUM_BMAP) {
- new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
- new->bc_private.b.flist = cur->bc_private.b.flist;
- new->bc_private.b.flags = cur->bc_private.b.flags;
+ }
+ new->bc_bufs[i] = bp;
}
*ncur = new;
return 0;
}
/*
- * Change the cursor to point to the first record at the given level.
- * Other levels are unaffected.
+ * XFS btree block layout and addressing:
+ *
+ * There are two types of blocks in the btree: leaf and non-leaf blocks.
+ *
+ * The leaf record start with a header then followed by records containing
+ * the values. A non-leaf block also starts with the same header, and
+ * then first contains lookup keys followed by an equal number of pointers
+ * to the btree blocks at the previous level.
+ *
+ * +--------+-------+-------+-------+-------+-------+-------+
+ * Leaf: | header | rec 1 | rec 2 | rec 3 | rec 4 | rec 5 | rec N |
+ * +--------+-------+-------+-------+-------+-------+-------+
+ *
+ * +--------+-------+-------+-------+-------+-------+-------+
+ * Non-Leaf: | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N |
+ * +--------+-------+-------+-------+-------+-------+-------+
+ *
+ * The header is called struct xfs_btree_block for reasons better left unknown
+ * and comes in different versions for short (32bit) and long (64bit) block
+ * pointers. The record and key structures are defined by the btree instances
+ * and opaque to the btree core. The block pointers are simple disk endian
+ * integers, available in a short (32bit) and long (64bit) variant.
+ *
+ * The helpers below calculate the offset of a given record, key or pointer
+ * into a btree block (xfs_btree_*_offset) or return a pointer to the given
+ * record, key or pointer (xfs_btree_*_addr). Note that all addressing
+ * inside the btree block is done using indices starting at one, not zero!
*/
-int /* success=1, failure=0 */
-xfs_btree_firstrec(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level) /* level to change */
+
+/*
+ * Return size of the btree block header for this btree instance.
+ */
+static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
{
- xfs_btree_block_t *block; /* generic btree block pointer */
- xfs_buf_t *bp; /* buffer containing block */
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+ if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
+ return XFS_BTREE_LBLOCK_CRC_LEN;
+ return XFS_BTREE_LBLOCK_LEN;
+ }
+ if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
+ return XFS_BTREE_SBLOCK_CRC_LEN;
+ return XFS_BTREE_SBLOCK_LEN;
+}
- /*
- * Get the block pointer for this level.
- */
- block = xfs_btree_get_block(cur, level, &bp);
- xfs_btree_check_block(cur, block, level, bp);
- /*
- * It's empty, there is no such record.
- */
- if (!block->bb_h.bb_numrecs)
- return 0;
- /*
- * Set the ptr value to 1, that's the first record/key.
- */
- cur->bc_ptrs[level] = 1;
- return 1;
+/*
+ * Return size of btree block pointers for this btree instance.
+ */
+static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
+{
+ return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
+ sizeof(__be64) : sizeof(__be32);
+}
+
+/*
+ * Calculate offset of the n-th record in a btree block.
+ */
+STATIC size_t
+xfs_btree_rec_offset(
+ struct xfs_btree_cur *cur,
+ int n)
+{
+ return xfs_btree_block_len(cur) +
+ (n - 1) * cur->bc_ops->rec_len;
+}
+
+/*
+ * Calculate offset of the n-th key in a btree block.
+ */
+STATIC size_t
+xfs_btree_key_offset(
+ struct xfs_btree_cur *cur,
+ int n)
+{
+ return xfs_btree_block_len(cur) +
+ (n - 1) * cur->bc_ops->key_len;
+}
+
+/*
+ * Calculate offset of the n-th block pointer in a btree block.
+ */
+STATIC size_t
+xfs_btree_ptr_offset(
+ struct xfs_btree_cur *cur,
+ int n,
+ int level)
+{
+ return xfs_btree_block_len(cur) +
+ cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
+ (n - 1) * xfs_btree_ptr_len(cur);
+}
+
+/*
+ * Return a pointer to the n-th record in the btree block.
+ */
+STATIC union xfs_btree_rec *
+xfs_btree_rec_addr(
+ struct xfs_btree_cur *cur,
+ int n,
+ struct xfs_btree_block *block)
+{
+ return (union xfs_btree_rec *)
+ ((char *)block + xfs_btree_rec_offset(cur, n));
+}
+
+/*
+ * Return a pointer to the n-th key in the btree block.
+ */
+STATIC union xfs_btree_key *
+xfs_btree_key_addr(
+ struct xfs_btree_cur *cur,
+ int n,
+ struct xfs_btree_block *block)
+{
+ return (union xfs_btree_key *)
+ ((char *)block + xfs_btree_key_offset(cur, n));
+}
+
+/*
+ * Return a pointer to the n-th block pointer in the btree block.
+ */
+STATIC union xfs_btree_ptr *
+xfs_btree_ptr_addr(
+ struct xfs_btree_cur *cur,
+ int n,
+ struct xfs_btree_block *block)
+{
+ int level = xfs_btree_get_level(block);
+
+ ASSERT(block->bb_level != 0);
+
+ return (union xfs_btree_ptr *)
+ ((char *)block + xfs_btree_ptr_offset(cur, n, level));
+}
+
+/*
+ * Get the root block which is stored in the inode.
+ *
+ * For now this btree implementation assumes the btree root is always
+ * stored in the if_broot field of an inode fork.
+ */
+STATIC struct xfs_btree_block *
+xfs_btree_get_iroot(
+ struct xfs_btree_cur *cur)
+{
+ struct xfs_ifork *ifp;
+
+ ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
+ return (struct xfs_btree_block *)ifp->if_broot;
}
/*
* Retrieve the block pointer from the cursor at the given level.
- * This may be a bmap btree root or from a buffer.
+ * This may be an inode btree root or from a buffer.
*/
-STATIC xfs_btree_block_t * /* generic btree block pointer */
+STATIC struct xfs_btree_block * /* generic btree block pointer */
xfs_btree_get_block(
- xfs_btree_cur_t *cur, /* btree cursor */
+ struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level in btree */
- xfs_buf_t **bpp) /* buffer containing the block */
-{
- xfs_btree_block_t *block; /* return value */
- xfs_buf_t *bp; /* return buffer */
- xfs_ifork_t *ifp; /* inode fork pointer */
- int whichfork; /* data or attr fork */
-
- if (cur->bc_btnum == XFS_BTNUM_BMAP && level == cur->bc_nlevels - 1) {
- whichfork = cur->bc_private.b.whichfork;
- ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, whichfork);
- block = (xfs_btree_block_t *)ifp->if_broot;
- bp = NULL;
- } else {
- bp = cur->bc_bufs[level];
- block = XFS_BUF_TO_BLOCK(bp);
+ struct xfs_buf **bpp) /* buffer containing the block */
+{
+ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+ (level == cur->bc_nlevels - 1)) {
+ *bpp = NULL;
+ return xfs_btree_get_iroot(cur);
}
- ASSERT(block != NULL);
- *bpp = bp;
- return block;
+
+ *bpp = cur->bc_bufs[level];
+ return XFS_BUF_TO_BLOCK(*bpp);
}
/*
@@ -525,15 +554,11 @@ xfs_btree_get_bufl(
xfs_fsblock_t fsbno, /* file system block number */
uint lock) /* lock flags for get_buf */
{
- xfs_buf_t *bp; /* buffer pointer (return value) */
xfs_daddr_t d; /* real disk block address */
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
- ASSERT(bp);
- ASSERT(!XFS_BUF_GETERROR(bp));
- return bp;
+ return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
}
/*
@@ -548,138 +573,72 @@ xfs_btree_get_bufs(
xfs_agblock_t agbno, /* allocation group block number */
uint lock) /* lock flags for get_buf */
{
- xfs_buf_t *bp; /* buffer pointer (return value) */
xfs_daddr_t d; /* real disk block address */
ASSERT(agno != NULLAGNUMBER);
ASSERT(agbno != NULLAGBLOCK);
d = XFS_AGB_TO_DADDR(mp, agno, agbno);
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
- ASSERT(bp);
- ASSERT(!XFS_BUF_GETERROR(bp));
- return bp;
+ return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
}
/*
- * Allocate a new btree cursor.
- * The cursor is either for allocation (A) or bmap (B) or inodes (I).
+ * Check for the cursor referring to the last block at the given level.
*/
-xfs_btree_cur_t * /* new btree cursor */
-xfs_btree_init_cursor(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_buf_t *agbp, /* (A only) buffer for agf structure */
- /* (I only) buffer for agi structure */
- xfs_agnumber_t agno, /* (AI only) allocation group number */
- xfs_btnum_t btnum, /* btree identifier */
- xfs_inode_t *ip, /* (B only) inode owning the btree */
- int whichfork) /* (B only) data or attr fork */
+int /* 1=is last block, 0=not last block */
+xfs_btree_islastblock(
+ xfs_btree_cur_t *cur, /* btree cursor */
+ int level) /* level to check */
{
- xfs_agf_t *agf; /* (A) allocation group freespace */
- xfs_agi_t *agi; /* (I) allocation group inodespace */
- xfs_btree_cur_t *cur; /* return value */
- xfs_ifork_t *ifp; /* (I) inode fork pointer */
- int nlevels=0; /* number of levels in the btree */
+ struct xfs_btree_block *block; /* generic btree block pointer */
+ xfs_buf_t *bp; /* buffer containing block */
- ASSERT(xfs_btree_cur_zone != NULL);
- /*
- * Allocate a new cursor.
- */
- cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
- /*
- * Deduce the number of btree levels from the arguments.
- */
- switch (btnum) {
- case XFS_BTNUM_BNO:
- case XFS_BTNUM_CNT:
- agf = XFS_BUF_TO_AGF(agbp);
- nlevels = be32_to_cpu(agf->agf_levels[btnum]);
- break;
- case XFS_BTNUM_BMAP:
- ifp = XFS_IFORK_PTR(ip, whichfork);
- nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
- break;
- case XFS_BTNUM_INO:
- agi = XFS_BUF_TO_AGI(agbp);
- nlevels = be32_to_cpu(agi->agi_level);
- break;
- default:
- ASSERT(0);
- }
- /*
- * Fill in the common fields.
- */
- cur->bc_tp = tp;
- cur->bc_mp = mp;
- cur->bc_nlevels = nlevels;
- cur->bc_btnum = btnum;
- cur->bc_blocklog = mp->m_sb.sb_blocklog;
- /*
- * Fill in private fields.
- */
- switch (btnum) {
- case XFS_BTNUM_BNO:
- case XFS_BTNUM_CNT:
- /*
- * Allocation btree fields.
- */
- cur->bc_private.a.agbp = agbp;
- cur->bc_private.a.agno = agno;
- break;
- case XFS_BTNUM_BMAP:
- /*
- * Bmap btree fields.
- */
- cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
- cur->bc_private.b.ip = ip;
- cur->bc_private.b.firstblock = NULLFSBLOCK;
- cur->bc_private.b.flist = NULL;
- cur->bc_private.b.allocated = 0;
- cur->bc_private.b.flags = 0;
- cur->bc_private.b.whichfork = whichfork;
- break;
- case XFS_BTNUM_INO:
- /*
- * Inode allocation btree fields.
- */
- cur->bc_private.i.agbp = agbp;
- cur->bc_private.i.agno = agno;
- break;
- default:
- ASSERT(0);
- }
- return cur;
+ block = xfs_btree_get_block(cur, level, &bp);
+ xfs_btree_check_block(cur, block, level, bp);
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO);
+ else
+ return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
}
/*
- * Check for the cursor referring to the last block at the given level.
+ * Change the cursor to point to the first record at the given level.
+ * Other levels are unaffected.
*/
-int /* 1=is last block, 0=not last block */
-xfs_btree_islastblock(
+STATIC int /* success=1, failure=0 */
+xfs_btree_firstrec(
xfs_btree_cur_t *cur, /* btree cursor */
- int level) /* level to check */
+ int level) /* level to change */
{
- xfs_btree_block_t *block; /* generic btree block pointer */
+ struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
+ /*
+ * Get the block pointer for this level.
+ */
block = xfs_btree_get_block(cur, level, &bp);
xfs_btree_check_block(cur, block, level, bp);
- if (XFS_BTREE_LONG_PTRS(cur->bc_btnum))
- return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO;
- else
- return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK;
+ /*
+ * It's empty, there is no such record.
+ */
+ if (!block->bb_numrecs)
+ return 0;
+ /*
+ * Set the ptr value to 1, that's the first record/key.
+ */
+ cur->bc_ptrs[level] = 1;
+ return 1;
}
/*
* Change the cursor to point to the last record in the current block
* at the given level. Other levels are unaffected.
*/
-int /* success=1, failure=0 */
+STATIC int /* success=1, failure=0 */
xfs_btree_lastrec(
xfs_btree_cur_t *cur, /* btree cursor */
int level) /* level to change */
{
- xfs_btree_block_t *block; /* generic btree block pointer */
+ struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
/*
@@ -690,12 +649,12 @@ xfs_btree_lastrec(
/*
* It's empty, there is no such record.
*/
- if (!block->bb_h.bb_numrecs)
+ if (!block->bb_numrecs)
return 0;
/*
* Set the ptr value to numrecs, that's the last record/key.
*/
- cur->bc_ptrs[level] = be16_to_cpu(block->bb_h.bb_numrecs);
+ cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
return 1;
}
@@ -739,69 +698,28 @@ xfs_btree_offsets(
* Get a buffer for the block, return it read in.
* Long-form addressing.
*/
-int /* error */
+int
xfs_btree_read_bufl(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_fsblock_t fsbno, /* file system block number */
- uint lock, /* lock flags for read_buf */
- xfs_buf_t **bpp, /* buffer for fsbno */
- int refval) /* ref count value for buffer */
+ struct xfs_mount *mp, /* file system mount point */
+ struct xfs_trans *tp, /* transaction pointer */
+ xfs_fsblock_t fsbno, /* file system block number */
+ uint lock, /* lock flags for read_buf */
+ struct xfs_buf **bpp, /* buffer for fsbno */
+ int refval, /* ref count value for buffer */
+ const struct xfs_buf_ops *ops)
{
- xfs_buf_t *bp; /* return value */
+ struct xfs_buf *bp; /* return value */
xfs_daddr_t d; /* real disk block address */
- int error;
+ int error;
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
- if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
- mp->m_bsize, lock, &bp))) {
+ error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
+ mp->m_bsize, lock, &bp, ops);
+ if (error)
return error;
- }
- ASSERT(!bp || !XFS_BUF_GETERROR(bp));
- if (bp != NULL) {
- XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
- }
- *bpp = bp;
- return 0;
-}
-
-/*
- * Get a buffer for the block, return it read in.
- * Short-form addressing.
- */
-int /* error */
-xfs_btree_read_bufs(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* allocation group block number */
- uint lock, /* lock flags for read_buf */
- xfs_buf_t **bpp, /* buffer for agno/agbno */
- int refval) /* ref count value for buffer */
-{
- xfs_buf_t *bp; /* return value */
- xfs_daddr_t d; /* real disk block address */
- int error;
-
- ASSERT(agno != NULLAGNUMBER);
- ASSERT(agbno != NULLAGBLOCK);
- d = XFS_AGB_TO_DADDR(mp, agno, agbno);
- if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
- mp->m_bsize, lock, &bp))) {
- return error;
- }
- ASSERT(!bp || !XFS_BUF_GETERROR(bp));
- if (bp != NULL) {
- switch (refval) {
- case XFS_ALLOC_BTREE_REF:
- XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
- break;
- case XFS_INO_BTREE_REF:
- XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, refval);
- break;
- }
- }
+ if (bp)
+ xfs_buf_set_ref(bp, refval);
*bpp = bp;
return 0;
}
@@ -813,15 +731,16 @@ xfs_btree_read_bufs(
/* ARGSUSED */
void
xfs_btree_reada_bufl(
- xfs_mount_t *mp, /* file system mount point */
- xfs_fsblock_t fsbno, /* file system block number */
- xfs_extlen_t count) /* count of filesystem blocks */
+ struct xfs_mount *mp, /* file system mount point */
+ xfs_fsblock_t fsbno, /* file system block number */
+ xfs_extlen_t count, /* count of filesystem blocks */
+ const struct xfs_buf_ops *ops)
{
xfs_daddr_t d;
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
- xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count);
+ xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
}
/*
@@ -831,111 +750,3320 @@ xfs_btree_reada_bufl(
/* ARGSUSED */
void
xfs_btree_reada_bufs(
- xfs_mount_t *mp, /* file system mount point */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* allocation group block number */
- xfs_extlen_t count) /* count of filesystem blocks */
+ struct xfs_mount *mp, /* file system mount point */
+ xfs_agnumber_t agno, /* allocation group number */
+ xfs_agblock_t agbno, /* allocation group block number */
+ xfs_extlen_t count, /* count of filesystem blocks */
+ const struct xfs_buf_ops *ops)
{
xfs_daddr_t d;
ASSERT(agno != NULLAGNUMBER);
ASSERT(agbno != NULLAGBLOCK);
d = XFS_AGB_TO_DADDR(mp, agno, agbno);
- xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count);
+ xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
+}
+
+STATIC int
+xfs_btree_readahead_lblock(
+ struct xfs_btree_cur *cur,
+ int lr,
+ struct xfs_btree_block *block)
+{
+ int rval = 0;
+ xfs_dfsbno_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
+ xfs_dfsbno_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
+
+ if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) {
+ xfs_btree_reada_bufl(cur->bc_mp, left, 1,
+ cur->bc_ops->buf_ops);
+ rval++;
+ }
+
+ if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLDFSBNO) {
+ xfs_btree_reada_bufl(cur->bc_mp, right, 1,
+ cur->bc_ops->buf_ops);
+ rval++;
+ }
+
+ return rval;
+}
+
+STATIC int
+xfs_btree_readahead_sblock(
+ struct xfs_btree_cur *cur,
+ int lr,
+ struct xfs_btree_block *block)
+{
+ int rval = 0;
+ xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
+ xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
+
+
+ if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
+ xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
+ left, 1, cur->bc_ops->buf_ops);
+ rval++;
+ }
+
+ if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
+ xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
+ right, 1, cur->bc_ops->buf_ops);
+ rval++;
+ }
+
+ return rval;
}
/*
* Read-ahead btree blocks, at the given level.
* Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
*/
-int
-xfs_btree_readahead_core(
- xfs_btree_cur_t *cur, /* btree cursor */
+STATIC int
+xfs_btree_readahead(
+ struct xfs_btree_cur *cur, /* btree cursor */
int lev, /* level in btree */
int lr) /* left/right bits */
{
- xfs_alloc_block_t *a;
- xfs_bmbt_block_t *b;
- xfs_inobt_block_t *i;
- int rval = 0;
+ struct xfs_btree_block *block;
+
+ /*
+ * No readahead needed if we are at the root level and the
+ * btree root is stored in the inode.
+ */
+ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+ (lev == cur->bc_nlevels - 1))
+ return 0;
+
+ if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
+ return 0;
- ASSERT(cur->bc_bufs[lev] != NULL);
cur->bc_ra[lev] |= lr;
- switch (cur->bc_btnum) {
- case XFS_BTNUM_BNO:
- case XFS_BTNUM_CNT:
- a = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]);
- if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(a->bb_leftsib) != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
- be32_to_cpu(a->bb_leftsib), 1);
- rval++;
- }
- if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(a->bb_rightsib) != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
- be32_to_cpu(a->bb_rightsib), 1);
- rval++;
- }
- break;
- case XFS_BTNUM_BMAP:
- b = XFS_BUF_TO_BMBT_BLOCK(cur->bc_bufs[lev]);
- if ((lr & XFS_BTCUR_LEFTRA) && be64_to_cpu(b->bb_leftsib) != NULLDFSBNO) {
- xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_leftsib), 1);
- rval++;
- }
- if ((lr & XFS_BTCUR_RIGHTRA) && be64_to_cpu(b->bb_rightsib) != NULLDFSBNO) {
- xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_rightsib), 1);
- rval++;
- }
- break;
- case XFS_BTNUM_INO:
- i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]);
- if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno,
- be32_to_cpu(i->bb_leftsib), 1);
- rval++;
- }
- if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) {
- xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.i.agno,
- be32_to_cpu(i->bb_rightsib), 1);
- rval++;
- }
- break;
- default:
- ASSERT(0);
+ block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
+
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ return xfs_btree_readahead_lblock(cur, lr, block);
+ return xfs_btree_readahead_sblock(cur, lr, block);
+}
+
+STATIC xfs_daddr_t
+xfs_btree_ptr_to_daddr(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr)
+{
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+ ASSERT(ptr->l != cpu_to_be64(NULLDFSBNO));
+
+ return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
+ } else {
+ ASSERT(cur->bc_private.a.agno != NULLAGNUMBER);
+ ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK));
+
+ return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
+ be32_to_cpu(ptr->s));
}
- return rval;
+}
+
+/*
+ * Readahead @count btree blocks at the given @ptr location.
+ *
+ * We don't need to care about long or short form btrees here as we have a
+ * method of converting the ptr directly to a daddr available to us.
+ */
+STATIC void
+xfs_btree_readahead_ptr(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ xfs_extlen_t count)
+{
+ xfs_buf_readahead(cur->bc_mp->m_ddev_targp,
+ xfs_btree_ptr_to_daddr(cur, ptr),
+ cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops);
}
/*
* Set the buffer for level "lev" in the cursor to bp, releasing
* any previous buffer.
*/
-void
+STATIC void
xfs_btree_setbuf(
xfs_btree_cur_t *cur, /* btree cursor */
int lev, /* level in btree */
xfs_buf_t *bp) /* new buffer to set */
{
- xfs_btree_block_t *b; /* btree block */
- xfs_buf_t *obp; /* old buffer pointer */
+ struct xfs_btree_block *b; /* btree block */
- obp = cur->bc_bufs[lev];
- if (obp)
- xfs_trans_brelse(cur->bc_tp, obp);
+ if (cur->bc_bufs[lev])
+ xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
cur->bc_bufs[lev] = bp;
cur->bc_ra[lev] = 0;
- if (!bp)
- return;
+
b = XFS_BUF_TO_BLOCK(bp);
- if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) {
- if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO)
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+ if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO))
cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
- if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO)
+ if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO))
cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
} else {
- if (be32_to_cpu(b->bb_u.s.bb_leftsib) == NULLAGBLOCK)
+ if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
- if (be32_to_cpu(b->bb_u.s.bb_rightsib) == NULLAGBLOCK)
+ if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
}
}
+
+STATIC int
+xfs_btree_ptr_is_null(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr)
+{
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ return ptr->l == cpu_to_be64(NULLDFSBNO);
+ else
+ return ptr->s == cpu_to_be32(NULLAGBLOCK);
+}
+
+STATIC void
+xfs_btree_set_ptr_null(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr)
+{
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ ptr->l = cpu_to_be64(NULLDFSBNO);
+ else
+ ptr->s = cpu_to_be32(NULLAGBLOCK);
+}
+
+/*
+ * Get/set/init sibling pointers
+ */
+STATIC void
+xfs_btree_get_sibling(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_ptr *ptr,
+ int lr)
+{
+ ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
+
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+ if (lr == XFS_BB_RIGHTSIB)
+ ptr->l = block->bb_u.l.bb_rightsib;
+ else
+ ptr->l = block->bb_u.l.bb_leftsib;
+ } else {
+ if (lr == XFS_BB_RIGHTSIB)
+ ptr->s = block->bb_u.s.bb_rightsib;
+ else
+ ptr->s = block->bb_u.s.bb_leftsib;
+ }
+}
+
+STATIC void
+xfs_btree_set_sibling(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_ptr *ptr,
+ int lr)
+{
+ ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
+
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+ if (lr == XFS_BB_RIGHTSIB)
+ block->bb_u.l.bb_rightsib = ptr->l;
+ else
+ block->bb_u.l.bb_leftsib = ptr->l;
+ } else {
+ if (lr == XFS_BB_RIGHTSIB)
+ block->bb_u.s.bb_rightsib = ptr->s;
+ else
+ block->bb_u.s.bb_leftsib = ptr->s;
+ }
+}
+
+void
+xfs_btree_init_block_int(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *buf,
+ xfs_daddr_t blkno,
+ __u32 magic,
+ __u16 level,
+ __u16 numrecs,
+ __u64 owner,
+ unsigned int flags)
+{
+ buf->bb_magic = cpu_to_be32(magic);
+ buf->bb_level = cpu_to_be16(level);
+ buf->bb_numrecs = cpu_to_be16(numrecs);
+
+ if (flags & XFS_BTREE_LONG_PTRS) {
+ buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
+ buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
+ if (flags & XFS_BTREE_CRC_BLOCKS) {
+ buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
+ buf->bb_u.l.bb_owner = cpu_to_be64(owner);
+ uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_uuid);
+ buf->bb_u.l.bb_pad = 0;
+ buf->bb_u.l.bb_lsn = 0;
+ }
+ } else {
+ /* owner is a 32 bit value on short blocks */
+ __u32 __owner = (__u32)owner;
+
+ buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+ buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+ if (flags & XFS_BTREE_CRC_BLOCKS) {
+ buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
+ buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
+ uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid);
+ buf->bb_u.s.bb_lsn = 0;
+ }
+ }
+}
+
+void
+xfs_btree_init_block(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ __u32 magic,
+ __u16 level,
+ __u16 numrecs,
+ __u64 owner,
+ unsigned int flags)
+{
+ xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
+ magic, level, numrecs, owner, flags);
+}
+
+STATIC void
+xfs_btree_init_block_cur(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp,
+ int level,
+ int numrecs)
+{
+ __u64 owner;
+
+ /*
+ * we can pull the owner from the cursor right now as the different
+ * owners align directly with the pointer size of the btree. This may
+ * change in future, but is safe for current users of the generic btree
+ * code.
+ */
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ owner = cur->bc_private.b.ip->i_ino;
+ else
+ owner = cur->bc_private.a.agno;
+
+ xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
+ xfs_btree_magic(cur), level, numrecs,
+ owner, cur->bc_flags);
+}
+
+/*
+ * Return true if ptr is the last record in the btree and
+ * we need to track updates to this record. The decision
+ * will be further refined in the update_lastrec method.
+ */
+STATIC int
+xfs_btree_is_lastrec(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ int level)
+{
+ union xfs_btree_ptr ptr;
+
+ if (level > 0)
+ return 0;
+ if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
+ return 0;
+
+ xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
+ if (!xfs_btree_ptr_is_null(cur, &ptr))
+ return 0;
+ return 1;
+}
+
+STATIC void
+xfs_btree_buf_to_ptr(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp,
+ union xfs_btree_ptr *ptr)
+{
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
+ XFS_BUF_ADDR(bp)));
+ else {
+ ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
+ XFS_BUF_ADDR(bp)));
+ }
+}
+
+STATIC void
+xfs_btree_set_refs(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ switch (cur->bc_btnum) {
+ case XFS_BTNUM_BNO:
+ case XFS_BTNUM_CNT:
+ xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF);
+ break;
+ case XFS_BTNUM_INO:
+ case XFS_BTNUM_FINO:
+ xfs_buf_set_ref(bp, XFS_INO_BTREE_REF);
+ break;
+ case XFS_BTNUM_BMAP:
+ xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
+ break;
+ default:
+ ASSERT(0);
+ }
+}
+
+STATIC int
+xfs_btree_get_buf_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ int flags,
+ struct xfs_btree_block **block,
+ struct xfs_buf **bpp)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ xfs_daddr_t d;
+
+ /* need to sort out how callers deal with failures first */
+ ASSERT(!(flags & XBF_TRYLOCK));
+
+ d = xfs_btree_ptr_to_daddr(cur, ptr);
+ *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
+ mp->m_bsize, flags);
+
+ if (!*bpp)
+ return ENOMEM;
+
+ (*bpp)->b_ops = cur->bc_ops->buf_ops;
+ *block = XFS_BUF_TO_BLOCK(*bpp);
+ return 0;
+}
+
+/*
+ * Read in the buffer at the given ptr and return the buffer and
+ * the block pointer within the buffer.
+ */
+STATIC int
+xfs_btree_read_buf_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ int flags,
+ struct xfs_btree_block **block,
+ struct xfs_buf **bpp)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ xfs_daddr_t d;
+ int error;
+
+ /* need to sort out how callers deal with failures first */
+ ASSERT(!(flags & XBF_TRYLOCK));
+
+ d = xfs_btree_ptr_to_daddr(cur, ptr);
+ error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
+ mp->m_bsize, flags, bpp,
+ cur->bc_ops->buf_ops);
+ if (error)
+ return error;
+
+ xfs_btree_set_refs(cur, *bpp);
+ *block = XFS_BUF_TO_BLOCK(*bpp);
+ return 0;
+}
+
+/*
+ * Copy keys from one btree block to another.
+ */
+STATIC void
+xfs_btree_copy_keys(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *dst_key,
+ union xfs_btree_key *src_key,
+ int numkeys)
+{
+ ASSERT(numkeys >= 0);
+ memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
+}
+
+/*
+ * Copy records from one btree block to another.
+ */
+STATIC void
+xfs_btree_copy_recs(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *dst_rec,
+ union xfs_btree_rec *src_rec,
+ int numrecs)
+{
+ ASSERT(numrecs >= 0);
+ memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
+}
+
+/*
+ * Copy block pointers from one btree block to another.
+ */
+STATIC void
+xfs_btree_copy_ptrs(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *dst_ptr,
+ union xfs_btree_ptr *src_ptr,
+ int numptrs)
+{
+ ASSERT(numptrs >= 0);
+ memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
+}
+
+/*
+ * Shift keys one index left/right inside a single btree block.
+ */
+STATIC void
+xfs_btree_shift_keys(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *key,
+ int dir,
+ int numkeys)
+{
+ char *dst_key;
+
+ ASSERT(numkeys >= 0);
+ ASSERT(dir == 1 || dir == -1);
+
+ dst_key = (char *)key + (dir * cur->bc_ops->key_len);
+ memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
+}
+
+/*
+ * Shift records one index left/right inside a single btree block.
+ */
+STATIC void
+xfs_btree_shift_recs(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec,
+ int dir,
+ int numrecs)
+{
+ char *dst_rec;
+
+ ASSERT(numrecs >= 0);
+ ASSERT(dir == 1 || dir == -1);
+
+ dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
+ memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
+}
+
+/*
+ * Shift block pointers one index left/right inside a single btree block.
+ */
+STATIC void
+xfs_btree_shift_ptrs(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ int dir,
+ int numptrs)
+{
+ char *dst_ptr;
+
+ ASSERT(numptrs >= 0);
+ ASSERT(dir == 1 || dir == -1);
+
+ dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
+ memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
+}
+
+/*
+ * Log key values from the btree block.
+ */
+STATIC void
+xfs_btree_log_keys(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp,
+ int first,
+ int last)
+{
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
+
+ if (bp) {
+ xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
+ xfs_trans_log_buf(cur->bc_tp, bp,
+ xfs_btree_key_offset(cur, first),
+ xfs_btree_key_offset(cur, last + 1) - 1);
+ } else {
+ xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
+ xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+ }
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+}
+
+/*
+ * Log record values from the btree block.
+ */
+void
+xfs_btree_log_recs(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp,
+ int first,
+ int last)
+{
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
+
+ xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
+ xfs_trans_log_buf(cur->bc_tp, bp,
+ xfs_btree_rec_offset(cur, first),
+ xfs_btree_rec_offset(cur, last + 1) - 1);
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+}
+
+/*
+ * Log block pointer fields from a btree block (nonleaf).
+ */
+STATIC void
+xfs_btree_log_ptrs(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ struct xfs_buf *bp, /* buffer containing btree block */
+ int first, /* index of first pointer to log */
+ int last) /* index of last pointer to log */
+{
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
+
+ if (bp) {
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ int level = xfs_btree_get_level(block);
+
+ xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
+ xfs_trans_log_buf(cur->bc_tp, bp,
+ xfs_btree_ptr_offset(cur, first, level),
+ xfs_btree_ptr_offset(cur, last + 1, level) - 1);
+ } else {
+ xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
+ xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+ }
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+}
+
+/*
+ * Log fields from a btree block header.
+ */
+void
+xfs_btree_log_block(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ struct xfs_buf *bp, /* buffer containing btree block */
+ int fields) /* mask of fields: XFS_BB_... */
+{
+ int first; /* first byte offset logged */
+ int last; /* last byte offset logged */
+ static const short soffsets[] = { /* table of offsets (short) */
+ offsetof(struct xfs_btree_block, bb_magic),
+ offsetof(struct xfs_btree_block, bb_level),
+ offsetof(struct xfs_btree_block, bb_numrecs),
+ offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
+ offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
+ offsetof(struct xfs_btree_block, bb_u.s.bb_blkno),
+ offsetof(struct xfs_btree_block, bb_u.s.bb_lsn),
+ offsetof(struct xfs_btree_block, bb_u.s.bb_uuid),
+ offsetof(struct xfs_btree_block, bb_u.s.bb_owner),
+ offsetof(struct xfs_btree_block, bb_u.s.bb_crc),
+ XFS_BTREE_SBLOCK_CRC_LEN
+ };
+ static const short loffsets[] = { /* table of offsets (long) */
+ offsetof(struct xfs_btree_block, bb_magic),
+ offsetof(struct xfs_btree_block, bb_level),
+ offsetof(struct xfs_btree_block, bb_numrecs),
+ offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
+ offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
+ offsetof(struct xfs_btree_block, bb_u.l.bb_blkno),
+ offsetof(struct xfs_btree_block, bb_u.l.bb_lsn),
+ offsetof(struct xfs_btree_block, bb_u.l.bb_uuid),
+ offsetof(struct xfs_btree_block, bb_u.l.bb_owner),
+ offsetof(struct xfs_btree_block, bb_u.l.bb_crc),
+ offsetof(struct xfs_btree_block, bb_u.l.bb_pad),
+ XFS_BTREE_LBLOCK_CRC_LEN
+ };
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGBI(cur, bp, fields);
+
+ if (bp) {
+ int nbits;
+
+ if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
+ /*
+ * We don't log the CRC when updating a btree
+ * block but instead recreate it during log
+ * recovery. As the log buffers have checksums
+ * of their own this is safe and avoids logging a crc
+ * update in a lot of places.
+ */
+ if (fields == XFS_BB_ALL_BITS)
+ fields = XFS_BB_ALL_BITS_CRC;
+ nbits = XFS_BB_NUM_BITS_CRC;
+ } else {
+ nbits = XFS_BB_NUM_BITS;
+ }
+ xfs_btree_offsets(fields,
+ (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
+ loffsets : soffsets,
+ nbits, &first, &last);
+ xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
+ xfs_trans_log_buf(cur->bc_tp, bp, first, last);
+ } else {
+ xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
+ xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+ }
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+}
+
+/*
+ * Increment cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int /* error */
+xfs_btree_increment(
+ struct xfs_btree_cur *cur,
+ int level,
+ int *stat) /* success/failure */
+{
+ struct xfs_btree_block *block;
+ union xfs_btree_ptr ptr;
+ struct xfs_buf *bp;
+ int error; /* error return value */
+ int lev;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGI(cur, level);
+
+ ASSERT(level < cur->bc_nlevels);
+
+ /* Read-ahead to the right at this level. */
+ xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
+
+ /* Get a pointer to the btree block. */
+ block = xfs_btree_get_block(cur, level, &bp);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error)
+ goto error0;
+#endif
+
+ /* We're done if we remain in the block after the increment. */
+ if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
+ goto out1;
+
+ /* Fail if we just went off the right edge of the tree. */
+ xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
+ if (xfs_btree_ptr_is_null(cur, &ptr))
+ goto out0;
+
+ XFS_BTREE_STATS_INC(cur, increment);
+
+ /*
+ * March up the tree incrementing pointers.
+ * Stop when we don't go off the right edge of a block.
+ */
+ for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
+ block = xfs_btree_get_block(cur, lev, &bp);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, lev, bp);
+ if (error)
+ goto error0;
+#endif
+
+ if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
+ break;
+
+ /* Read-ahead the right block for the next loop. */
+ xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
+ }
+
+ /*
+ * If we went off the root then we are either seriously
+ * confused or have the tree root in an inode.
+ */
+ if (lev == cur->bc_nlevels) {
+ if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
+ goto out0;
+ ASSERT(0);
+ error = EFSCORRUPTED;
+ goto error0;
+ }
+ ASSERT(lev < cur->bc_nlevels);
+
+ /*
+ * Now walk back down the tree, fixing up the cursor's buffer
+ * pointers and key numbers.
+ */
+ for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
+ union xfs_btree_ptr *ptrp;
+
+ ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
+ --lev;
+ error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
+ if (error)
+ goto error0;
+
+ xfs_btree_setbuf(cur, lev, bp);
+ cur->bc_ptrs[lev] = 1;
+ }
+out1:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+
+out0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+/*
+ * Decrement cursor by one record at the level.
+ * For nonzero levels the leaf-ward information is untouched.
+ */
+int /* error */
+xfs_btree_decrement(
+ struct xfs_btree_cur *cur,
+ int level,
+ int *stat) /* success/failure */
+{
+ struct xfs_btree_block *block;
+ xfs_buf_t *bp;
+ int error; /* error return value */
+ int lev;
+ union xfs_btree_ptr ptr;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGI(cur, level);
+
+ ASSERT(level < cur->bc_nlevels);
+
+ /* Read-ahead to the left at this level. */
+ xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
+
+ /* We're done if we remain in the block after the decrement. */
+ if (--cur->bc_ptrs[level] > 0)
+ goto out1;
+
+ /* Get a pointer to the btree block. */
+ block = xfs_btree_get_block(cur, level, &bp);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error)
+ goto error0;
+#endif
+
+ /* Fail if we just went off the left edge of the tree. */
+ xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
+ if (xfs_btree_ptr_is_null(cur, &ptr))
+ goto out0;
+
+ XFS_BTREE_STATS_INC(cur, decrement);
+
+ /*
+ * March up the tree decrementing pointers.
+ * Stop when we don't go off the left edge of a block.
+ */
+ for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
+ if (--cur->bc_ptrs[lev] > 0)
+ break;
+ /* Read-ahead the left block for the next loop. */
+ xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
+ }
+
+ /*
+ * If we went off the root then we are seriously confused.
+ * or the root of the tree is in an inode.
+ */
+ if (lev == cur->bc_nlevels) {
+ if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
+ goto out0;
+ ASSERT(0);
+ error = EFSCORRUPTED;
+ goto error0;
+ }
+ ASSERT(lev < cur->bc_nlevels);
+
+ /*
+ * Now walk back down the tree, fixing up the cursor's buffer
+ * pointers and key numbers.
+ */
+ for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
+ union xfs_btree_ptr *ptrp;
+
+ ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
+ --lev;
+ error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
+ if (error)
+ goto error0;
+ xfs_btree_setbuf(cur, lev, bp);
+ cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
+ }
+out1:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+
+out0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+STATIC int
+xfs_btree_lookup_get_block(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ int level, /* level in the btree */
+ union xfs_btree_ptr *pp, /* ptr to btree block */
+ struct xfs_btree_block **blkp) /* return btree block */
+{
+ struct xfs_buf *bp; /* buffer pointer for btree block */
+ int error = 0;
+
+ /* special case the root block if in an inode */
+ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+ (level == cur->bc_nlevels - 1)) {
+ *blkp = xfs_btree_get_iroot(cur);
+ return 0;
+ }
+
+ /*
+ * If the old buffer at this level for the disk address we are
+ * looking for re-use it.
+ *
+ * Otherwise throw it away and get a new one.
+ */
+ bp = cur->bc_bufs[level];
+ if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) {
+ *blkp = XFS_BUF_TO_BLOCK(bp);
+ return 0;
+ }
+
+ error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
+ if (error)
+ return error;
+
+ xfs_btree_setbuf(cur, level, bp);
+ return 0;
+}
+
+/*
+ * Get current search key. For level 0 we don't actually have a key
+ * structure so we make one up from the record. For all other levels
+ * we just return the right key.
+ */
+STATIC union xfs_btree_key *
+xfs_lookup_get_search_key(
+ struct xfs_btree_cur *cur,
+ int level,
+ int keyno,
+ struct xfs_btree_block *block,
+ union xfs_btree_key *kp)
+{
+ if (level == 0) {
+ cur->bc_ops->init_key_from_rec(kp,
+ xfs_btree_rec_addr(cur, keyno, block));
+ return kp;
+ }
+
+ return xfs_btree_key_addr(cur, keyno, block);
+}
+
+/*
+ * Lookup the record. The cursor is made to point to it, based on dir.
+ * stat is set to 0 if can't find any such record, 1 for success.
+ */
+int /* error */
+xfs_btree_lookup(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_lookup_t dir, /* <=, ==, or >= */
+ int *stat) /* success/failure */
+{
+ struct xfs_btree_block *block; /* current btree block */
+ __int64_t diff; /* difference for the current key */
+ int error; /* error return value */
+ int keyno; /* current key number */
+ int level; /* level in the btree */
+ union xfs_btree_ptr *pp; /* ptr to btree block */
+ union xfs_btree_ptr ptr; /* ptr to btree block */
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGI(cur, dir);
+
+ XFS_BTREE_STATS_INC(cur, lookup);
+
+ block = NULL;
+ keyno = 0;
+
+ /* initialise start pointer from cursor */
+ cur->bc_ops->init_ptr_from_cur(cur, &ptr);
+ pp = &ptr;
+
+ /*
+ * Iterate over each level in the btree, starting at the root.
+ * For each level above the leaves, find the key we need, based
+ * on the lookup record, then follow the corresponding block
+ * pointer down to the next level.
+ */
+ for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
+ /* Get the block we need to do the lookup on. */
+ error = xfs_btree_lookup_get_block(cur, level, pp, &block);
+ if (error)
+ goto error0;
+
+ if (diff == 0) {
+ /*
+ * If we already had a key match at a higher level, we
+ * know we need to use the first entry in this block.
+ */
+ keyno = 1;
+ } else {
+ /* Otherwise search this block. Do a binary search. */
+
+ int high; /* high entry number */
+ int low; /* low entry number */
+
+ /* Set low and high entry numbers, 1-based. */
+ low = 1;
+ high = xfs_btree_get_numrecs(block);
+ if (!high) {
+ /* Block is empty, must be an empty leaf. */
+ ASSERT(level == 0 && cur->bc_nlevels == 1);
+
+ cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+ }
+
+ /* Binary search the block. */
+ while (low <= high) {
+ union xfs_btree_key key;
+ union xfs_btree_key *kp;
+
+ XFS_BTREE_STATS_INC(cur, compare);
+
+ /* keyno is average of low and high. */
+ keyno = (low + high) >> 1;
+
+ /* Get current search key */
+ kp = xfs_lookup_get_search_key(cur, level,
+ keyno, block, &key);
+
+ /*
+ * Compute difference to get next direction:
+ * - less than, move right
+ * - greater than, move left
+ * - equal, we're done
+ */
+ diff = cur->bc_ops->key_diff(cur, kp);
+ if (diff < 0)
+ low = keyno + 1;
+ else if (diff > 0)
+ high = keyno - 1;
+ else
+ break;
+ }
+ }
+
+ /*
+ * If there are more levels, set up for the next level
+ * by getting the block number and filling in the cursor.
+ */
+ if (level > 0) {
+ /*
+ * If we moved left, need the previous key number,
+ * unless there isn't one.
+ */
+ if (diff > 0 && --keyno < 1)
+ keyno = 1;
+ pp = xfs_btree_ptr_addr(cur, keyno, block);
+
+#ifdef DEBUG
+ error = xfs_btree_check_ptr(cur, pp, 0, level);
+ if (error)
+ goto error0;
+#endif
+ cur->bc_ptrs[level] = keyno;
+ }
+ }
+
+ /* Done with the search. See if we need to adjust the results. */
+ if (dir != XFS_LOOKUP_LE && diff < 0) {
+ keyno++;
+ /*
+ * If ge search and we went off the end of the block, but it's
+ * not the last block, we're in the wrong block.
+ */
+ xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
+ if (dir == XFS_LOOKUP_GE &&
+ keyno > xfs_btree_get_numrecs(block) &&
+ !xfs_btree_ptr_is_null(cur, &ptr)) {
+ int i;
+
+ cur->bc_ptrs[0] = keyno;
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto error0;
+ XFS_WANT_CORRUPTED_RETURN(i == 1);
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+ }
+ } else if (dir == XFS_LOOKUP_LE && diff > 0)
+ keyno--;
+ cur->bc_ptrs[0] = keyno;
+
+ /* Return if we succeeded or not. */
+ if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
+ *stat = 0;
+ else if (dir != XFS_LOOKUP_EQ || diff == 0)
+ *stat = 1;
+ else
+ *stat = 0;
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ return 0;
+
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+/*
+ * Update keys at all levels from here to the root along the cursor's path.
+ */
+STATIC int
+xfs_btree_updkey(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *keyp,
+ int level)
+{
+ struct xfs_btree_block *block;
+ struct xfs_buf *bp;
+ union xfs_btree_key *kp;
+ int ptr;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGIK(cur, level, keyp);
+
+ ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || level >= 1);
+
+ /*
+ * Go up the tree from this level toward the root.
+ * At each level, update the key value to the value input.
+ * Stop when we reach a level where the cursor isn't pointing
+ * at the first entry in the block.
+ */
+ for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
+#ifdef DEBUG
+ int error;
+#endif
+ block = xfs_btree_get_block(cur, level, &bp);
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+ }
+#endif
+ ptr = cur->bc_ptrs[level];
+ kp = xfs_btree_key_addr(cur, ptr, block);
+ xfs_btree_copy_keys(cur, kp, keyp, 1);
+ xfs_btree_log_keys(cur, bp, ptr, ptr);
+ }
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ return 0;
+}
+
+/*
+ * Update the record referred to by cur to the value in the
+ * given record. This either works (return 0) or gets an
+ * EFSCORRUPTED error.
+ */
+int
+xfs_btree_update(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec)
+{
+ struct xfs_btree_block *block;
+ struct xfs_buf *bp;
+ int error;
+ int ptr;
+ union xfs_btree_rec *rp;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGR(cur, rec);
+
+ /* Pick up the current block. */
+ block = xfs_btree_get_block(cur, 0, &bp);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, 0, bp);
+ if (error)
+ goto error0;
+#endif
+ /* Get the address of the rec to be updated. */
+ ptr = cur->bc_ptrs[0];
+ rp = xfs_btree_rec_addr(cur, ptr, block);
+
+ /* Fill in the new contents and log them. */
+ xfs_btree_copy_recs(cur, rp, rec, 1);
+ xfs_btree_log_recs(cur, bp, ptr, ptr);
+
+ /*
+ * If we are tracking the last record in the tree and
+ * we are at the far right edge of the tree, update it.
+ */
+ if (xfs_btree_is_lastrec(cur, block, 0)) {
+ cur->bc_ops->update_lastrec(cur, block, rec,
+ ptr, LASTREC_UPDATE);
+ }
+
+ /* Updating first rec in leaf. Pass new key value up to our parent. */
+ if (ptr == 1) {
+ union xfs_btree_key key;
+
+ cur->bc_ops->init_key_from_rec(&key, rec);
+ error = xfs_btree_updkey(cur, &key, 1);
+ if (error)
+ goto error0;
+ }
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ return 0;
+
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+/*
+ * Move 1 record left from cur/level if possible.
+ * Update cur to reflect the new path.
+ */
+STATIC int /* error */
+xfs_btree_lshift(
+ struct xfs_btree_cur *cur,
+ int level,
+ int *stat) /* success/failure */
+{
+ union xfs_btree_key key; /* btree key */
+ struct xfs_buf *lbp; /* left buffer pointer */
+ struct xfs_btree_block *left; /* left btree block */
+ int lrecs; /* left record count */
+ struct xfs_buf *rbp; /* right buffer pointer */
+ struct xfs_btree_block *right; /* right btree block */
+ int rrecs; /* right record count */
+ union xfs_btree_ptr lptr; /* left btree pointer */
+ union xfs_btree_key *rkp = NULL; /* right btree key */
+ union xfs_btree_ptr *rpp = NULL; /* right address pointer */
+ union xfs_btree_rec *rrp = NULL; /* right record pointer */
+ int error; /* error return value */
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGI(cur, level);
+
+ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+ level == cur->bc_nlevels - 1)
+ goto out0;
+
+ /* Set up variables for this block as "right". */
+ right = xfs_btree_get_block(cur, level, &rbp);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, right, level, rbp);
+ if (error)
+ goto error0;
+#endif
+
+ /* If we've got no left sibling then we can't shift an entry left. */
+ xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
+ if (xfs_btree_ptr_is_null(cur, &lptr))
+ goto out0;
+
+ /*
+ * If the cursor entry is the one that would be moved, don't
+ * do it... it's too complicated.
+ */
+ if (cur->bc_ptrs[level] <= 1)
+ goto out0;
+
+ /* Set up the left neighbor as "left". */
+ error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
+ if (error)
+ goto error0;
+
+ /* If it's full, it can't take another entry. */
+ lrecs = xfs_btree_get_numrecs(left);
+ if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
+ goto out0;
+
+ rrecs = xfs_btree_get_numrecs(right);
+
+ /*
+ * We add one entry to the left side and remove one for the right side.
+ * Account for it here, the changes will be updated on disk and logged
+ * later.
+ */
+ lrecs++;
+ rrecs--;
+
+ XFS_BTREE_STATS_INC(cur, lshift);
+ XFS_BTREE_STATS_ADD(cur, moves, 1);
+
+ /*
+ * If non-leaf, copy a key and a ptr to the left block.
+ * Log the changes to the left block.
+ */
+ if (level > 0) {
+ /* It's a non-leaf. Move keys and pointers. */
+ union xfs_btree_key *lkp; /* left btree key */
+ union xfs_btree_ptr *lpp; /* left address pointer */
+
+ lkp = xfs_btree_key_addr(cur, lrecs, left);
+ rkp = xfs_btree_key_addr(cur, 1, right);
+
+ lpp = xfs_btree_ptr_addr(cur, lrecs, left);
+ rpp = xfs_btree_ptr_addr(cur, 1, right);
+#ifdef DEBUG
+ error = xfs_btree_check_ptr(cur, rpp, 0, level);
+ if (error)
+ goto error0;
+#endif
+ xfs_btree_copy_keys(cur, lkp, rkp, 1);
+ xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
+
+ xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
+ xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
+
+ ASSERT(cur->bc_ops->keys_inorder(cur,
+ xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
+ } else {
+ /* It's a leaf. Move records. */
+ union xfs_btree_rec *lrp; /* left record pointer */
+
+ lrp = xfs_btree_rec_addr(cur, lrecs, left);
+ rrp = xfs_btree_rec_addr(cur, 1, right);
+
+ xfs_btree_copy_recs(cur, lrp, rrp, 1);
+ xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
+
+ ASSERT(cur->bc_ops->recs_inorder(cur,
+ xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
+ }
+
+ xfs_btree_set_numrecs(left, lrecs);
+ xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
+
+ xfs_btree_set_numrecs(right, rrecs);
+ xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
+
+ /*
+ * Slide the contents of right down one entry.
+ */
+ XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
+ if (level > 0) {
+ /* It's a nonleaf. operate on keys and ptrs */
+#ifdef DEBUG
+ int i; /* loop index */
+
+ for (i = 0; i < rrecs; i++) {
+ error = xfs_btree_check_ptr(cur, rpp, i + 1, level);
+ if (error)
+ goto error0;
+ }
+#endif
+ xfs_btree_shift_keys(cur,
+ xfs_btree_key_addr(cur, 2, right),
+ -1, rrecs);
+ xfs_btree_shift_ptrs(cur,
+ xfs_btree_ptr_addr(cur, 2, right),
+ -1, rrecs);
+
+ xfs_btree_log_keys(cur, rbp, 1, rrecs);
+ xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
+ } else {
+ /* It's a leaf. operate on records */
+ xfs_btree_shift_recs(cur,
+ xfs_btree_rec_addr(cur, 2, right),
+ -1, rrecs);
+ xfs_btree_log_recs(cur, rbp, 1, rrecs);
+
+ /*
+ * If it's the first record in the block, we'll need a key
+ * structure to pass up to the next level (updkey).
+ */
+ cur->bc_ops->init_key_from_rec(&key,
+ xfs_btree_rec_addr(cur, 1, right));
+ rkp = &key;
+ }
+
+ /* Update the parent key values of right. */
+ error = xfs_btree_updkey(cur, rkp, level + 1);
+ if (error)
+ goto error0;
+
+ /* Slide the cursor value left one. */
+ cur->bc_ptrs[level]--;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+
+out0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+/*
+ * Move 1 record right from cur/level if possible.
+ * Update cur to reflect the new path.
+ */
+STATIC int /* error */
+xfs_btree_rshift(
+ struct xfs_btree_cur *cur,
+ int level,
+ int *stat) /* success/failure */
+{
+ union xfs_btree_key key; /* btree key */
+ struct xfs_buf *lbp; /* left buffer pointer */
+ struct xfs_btree_block *left; /* left btree block */
+ struct xfs_buf *rbp; /* right buffer pointer */
+ struct xfs_btree_block *right; /* right btree block */
+ struct xfs_btree_cur *tcur; /* temporary btree cursor */
+ union xfs_btree_ptr rptr; /* right block pointer */
+ union xfs_btree_key *rkp; /* right btree key */
+ int rrecs; /* right record count */
+ int lrecs; /* left record count */
+ int error; /* error return value */
+ int i; /* loop counter */
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGI(cur, level);
+
+ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+ (level == cur->bc_nlevels - 1))
+ goto out0;
+
+ /* Set up variables for this block as "left". */
+ left = xfs_btree_get_block(cur, level, &lbp);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, left, level, lbp);
+ if (error)
+ goto error0;
+#endif
+
+ /* If we've got no right sibling then we can't shift an entry right. */
+ xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
+ if (xfs_btree_ptr_is_null(cur, &rptr))
+ goto out0;
+
+ /*
+ * If the cursor entry is the one that would be moved, don't
+ * do it... it's too complicated.
+ */
+ lrecs = xfs_btree_get_numrecs(left);
+ if (cur->bc_ptrs[level] >= lrecs)
+ goto out0;
+
+ /* Set up the right neighbor as "right". */
+ error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
+ if (error)
+ goto error0;
+
+ /* If it's full, it can't take another entry. */
+ rrecs = xfs_btree_get_numrecs(right);
+ if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
+ goto out0;
+
+ XFS_BTREE_STATS_INC(cur, rshift);
+ XFS_BTREE_STATS_ADD(cur, moves, rrecs);
+
+ /*
+ * Make a hole at the start of the right neighbor block, then
+ * copy the last left block entry to the hole.
+ */
+ if (level > 0) {
+ /* It's a nonleaf. make a hole in the keys and ptrs */
+ union xfs_btree_key *lkp;
+ union xfs_btree_ptr *lpp;
+ union xfs_btree_ptr *rpp;
+
+ lkp = xfs_btree_key_addr(cur, lrecs, left);
+ lpp = xfs_btree_ptr_addr(cur, lrecs, left);
+ rkp = xfs_btree_key_addr(cur, 1, right);
+ rpp = xfs_btree_ptr_addr(cur, 1, right);
+
+#ifdef DEBUG
+ for (i = rrecs - 1; i >= 0; i--) {
+ error = xfs_btree_check_ptr(cur, rpp, i, level);
+ if (error)
+ goto error0;
+ }
+#endif
+
+ xfs_btree_shift_keys(cur, rkp, 1, rrecs);
+ xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
+
+#ifdef DEBUG
+ error = xfs_btree_check_ptr(cur, lpp, 0, level);
+ if (error)
+ goto error0;
+#endif
+
+ /* Now put the new data in, and log it. */
+ xfs_btree_copy_keys(cur, rkp, lkp, 1);
+ xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
+
+ xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
+ xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
+
+ ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
+ xfs_btree_key_addr(cur, 2, right)));
+ } else {
+ /* It's a leaf. make a hole in the records */
+ union xfs_btree_rec *lrp;
+ union xfs_btree_rec *rrp;
+
+ lrp = xfs_btree_rec_addr(cur, lrecs, left);
+ rrp = xfs_btree_rec_addr(cur, 1, right);
+
+ xfs_btree_shift_recs(cur, rrp, 1, rrecs);
+
+ /* Now put the new data in, and log it. */
+ xfs_btree_copy_recs(cur, rrp, lrp, 1);
+ xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
+
+ cur->bc_ops->init_key_from_rec(&key, rrp);
+ rkp = &key;
+
+ ASSERT(cur->bc_ops->recs_inorder(cur, rrp,
+ xfs_btree_rec_addr(cur, 2, right)));
+ }
+
+ /*
+ * Decrement and log left's numrecs, bump and log right's numrecs.
+ */
+ xfs_btree_set_numrecs(left, --lrecs);
+ xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
+
+ xfs_btree_set_numrecs(right, ++rrecs);
+ xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
+
+ /*
+ * Using a temporary cursor, update the parent key values of the
+ * block on the right.
+ */
+ error = xfs_btree_dup_cursor(cur, &tcur);
+ if (error)
+ goto error0;
+ i = xfs_btree_lastrec(tcur, level);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+
+ error = xfs_btree_increment(tcur, level, &i);
+ if (error)
+ goto error1;
+
+ error = xfs_btree_updkey(tcur, rkp, level + 1);
+ if (error)
+ goto error1;
+
+ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+
+out0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+
+error1:
+ XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
+ xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+ return error;
+}
+
+/*
+ * Split cur/level block in half.
+ * Return new block number and the key to its first
+ * record (to be inserted into parent).
+ */
+STATIC int /* error */
+__xfs_btree_split(
+ struct xfs_btree_cur *cur,
+ int level,
+ union xfs_btree_ptr *ptrp,
+ union xfs_btree_key *key,
+ struct xfs_btree_cur **curp,
+ int *stat) /* success/failure */
+{
+ union xfs_btree_ptr lptr; /* left sibling block ptr */
+ struct xfs_buf *lbp; /* left buffer pointer */
+ struct xfs_btree_block *left; /* left btree block */
+ union xfs_btree_ptr rptr; /* right sibling block ptr */
+ struct xfs_buf *rbp; /* right buffer pointer */
+ struct xfs_btree_block *right; /* right btree block */
+ union xfs_btree_ptr rrptr; /* right-right sibling ptr */
+ struct xfs_buf *rrbp; /* right-right buffer pointer */
+ struct xfs_btree_block *rrblock; /* right-right btree block */
+ int lrecs;
+ int rrecs;
+ int src_index;
+ int error; /* error return value */
+#ifdef DEBUG
+ int i;
+#endif
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key);
+
+ XFS_BTREE_STATS_INC(cur, split);
+
+ /* Set up left block (current one). */
+ left = xfs_btree_get_block(cur, level, &lbp);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, left, level, lbp);
+ if (error)
+ goto error0;
+#endif
+
+ xfs_btree_buf_to_ptr(cur, lbp, &lptr);
+
+ /* Allocate the new block. If we can't do it, we're toast. Give up. */
+ error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat);
+ if (error)
+ goto error0;
+ if (*stat == 0)
+ goto out0;
+ XFS_BTREE_STATS_INC(cur, alloc);
+
+ /* Set up the new block as "right". */
+ error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp);
+ if (error)
+ goto error0;
+
+ /* Fill in the btree header for the new right block. */
+ xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
+
+ /*
+ * Split the entries between the old and the new block evenly.
+ * Make sure that if there's an odd number of entries now, that
+ * each new block will have the same number of entries.
+ */
+ lrecs = xfs_btree_get_numrecs(left);
+ rrecs = lrecs / 2;
+ if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
+ rrecs++;
+ src_index = (lrecs - rrecs + 1);
+
+ XFS_BTREE_STATS_ADD(cur, moves, rrecs);
+
+ /*
+ * Copy btree block entries from the left block over to the
+ * new block, the right. Update the right block and log the
+ * changes.
+ */
+ if (level > 0) {
+ /* It's a non-leaf. Move keys and pointers. */
+ union xfs_btree_key *lkp; /* left btree key */
+ union xfs_btree_ptr *lpp; /* left address pointer */
+ union xfs_btree_key *rkp; /* right btree key */
+ union xfs_btree_ptr *rpp; /* right address pointer */
+
+ lkp = xfs_btree_key_addr(cur, src_index, left);
+ lpp = xfs_btree_ptr_addr(cur, src_index, left);
+ rkp = xfs_btree_key_addr(cur, 1, right);
+ rpp = xfs_btree_ptr_addr(cur, 1, right);
+
+#ifdef DEBUG
+ for (i = src_index; i < rrecs; i++) {
+ error = xfs_btree_check_ptr(cur, lpp, i, level);
+ if (error)
+ goto error0;
+ }
+#endif
+
+ xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
+ xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
+
+ xfs_btree_log_keys(cur, rbp, 1, rrecs);
+ xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
+
+ /* Grab the keys to the entries moved to the right block */
+ xfs_btree_copy_keys(cur, key, rkp, 1);
+ } else {
+ /* It's a leaf. Move records. */
+ union xfs_btree_rec *lrp; /* left record pointer */
+ union xfs_btree_rec *rrp; /* right record pointer */
+
+ lrp = xfs_btree_rec_addr(cur, src_index, left);
+ rrp = xfs_btree_rec_addr(cur, 1, right);
+
+ xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
+ xfs_btree_log_recs(cur, rbp, 1, rrecs);
+
+ cur->bc_ops->init_key_from_rec(key,
+ xfs_btree_rec_addr(cur, 1, right));
+ }
+
+
+ /*
+ * Find the left block number by looking in the buffer.
+ * Adjust numrecs, sibling pointers.
+ */
+ xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
+ xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
+ xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
+ xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
+
+ lrecs -= rrecs;
+ xfs_btree_set_numrecs(left, lrecs);
+ xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
+
+ xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
+ xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
+
+ /*
+ * If there's a block to the new block's right, make that block
+ * point back to right instead of to left.
+ */
+ if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
+ error = xfs_btree_read_buf_block(cur, &rrptr,
+ 0, &rrblock, &rrbp);
+ if (error)
+ goto error0;
+ xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
+ xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
+ }
+ /*
+ * If the cursor is really in the right block, move it there.
+ * If it's just pointing past the last entry in left, then we'll
+ * insert there, so don't change anything in that case.
+ */
+ if (cur->bc_ptrs[level] > lrecs + 1) {
+ xfs_btree_setbuf(cur, level, rbp);
+ cur->bc_ptrs[level] -= lrecs;
+ }
+ /*
+ * If there are more levels, we'll need another cursor which refers
+ * the right block, no matter where this cursor was.
+ */
+ if (level + 1 < cur->bc_nlevels) {
+ error = xfs_btree_dup_cursor(cur, curp);
+ if (error)
+ goto error0;
+ (*curp)->bc_ptrs[level + 1]++;
+ }
+ *ptrp = rptr;
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+out0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+struct xfs_btree_split_args {
+ struct xfs_btree_cur *cur;
+ int level;
+ union xfs_btree_ptr *ptrp;
+ union xfs_btree_key *key;
+ struct xfs_btree_cur **curp;
+ int *stat; /* success/failure */
+ int result;
+ bool kswapd; /* allocation in kswapd context */
+ struct completion *done;
+ struct work_struct work;
+};
+
+/*
+ * Stack switching interfaces for allocation
+ */
+static void
+xfs_btree_split_worker(
+ struct work_struct *work)
+{
+ struct xfs_btree_split_args *args = container_of(work,
+ struct xfs_btree_split_args, work);
+ unsigned long pflags;
+ unsigned long new_pflags = PF_FSTRANS;
+
+ /*
+ * we are in a transaction context here, but may also be doing work
+ * in kswapd context, and hence we may need to inherit that state
+ * temporarily to ensure that we don't block waiting for memory reclaim
+ * in any way.
+ */
+ if (args->kswapd)
+ new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+
+ current_set_flags_nested(&pflags, new_pflags);
+
+ args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
+ args->key, args->curp, args->stat);
+ complete(args->done);
+
+ current_restore_flags_nested(&pflags, new_pflags);
+}
+
+/*
+ * BMBT split requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. For the other
+ * btree types, just call directly to avoid the context switch overhead here.
+ */
+STATIC int /* error */
+xfs_btree_split(
+ struct xfs_btree_cur *cur,
+ int level,
+ union xfs_btree_ptr *ptrp,
+ union xfs_btree_key *key,
+ struct xfs_btree_cur **curp,
+ int *stat) /* success/failure */
+{
+ struct xfs_btree_split_args args;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ if (cur->bc_btnum != XFS_BTNUM_BMAP)
+ return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
+
+ args.cur = cur;
+ args.level = level;
+ args.ptrp = ptrp;
+ args.key = key;
+ args.curp = curp;
+ args.stat = stat;
+ args.done = &done;
+ args.kswapd = current_is_kswapd();
+ INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
+ queue_work(xfs_alloc_wq, &args.work);
+ wait_for_completion(&done);
+ destroy_work_on_stack(&args.work);
+ return args.result;
+}
+
+
+/*
+ * Copy the old inode root contents into a real block and make the
+ * broot point to it.
+ */
+int /* error */
+xfs_btree_new_iroot(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ int *logflags, /* logging flags for inode */
+ int *stat) /* return status - 0 fail */
+{
+ struct xfs_buf *cbp; /* buffer for cblock */
+ struct xfs_btree_block *block; /* btree block */
+ struct xfs_btree_block *cblock; /* child btree block */
+ union xfs_btree_key *ckp; /* child key pointer */
+ union xfs_btree_ptr *cpp; /* child ptr pointer */
+ union xfs_btree_key *kp; /* pointer to btree key */
+ union xfs_btree_ptr *pp; /* pointer to block addr */
+ union xfs_btree_ptr nptr; /* new block addr */
+ int level; /* btree level */
+ int error; /* error return code */
+#ifdef DEBUG
+ int i; /* loop counter */
+#endif
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_STATS_INC(cur, newroot);
+
+ ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
+
+ level = cur->bc_nlevels - 1;
+
+ block = xfs_btree_get_iroot(cur);
+ pp = xfs_btree_ptr_addr(cur, 1, block);
+
+ /* Allocate the new block. If we can't do it, we're toast. Give up. */
+ error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat);
+ if (error)
+ goto error0;
+ if (*stat == 0) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ return 0;
+ }
+ XFS_BTREE_STATS_INC(cur, alloc);
+
+ /* Copy the root into a real block. */
+ error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp);
+ if (error)
+ goto error0;
+
+ /*
+ * we can't just memcpy() the root in for CRC enabled btree blocks.
+ * In that case have to also ensure the blkno remains correct
+ */
+ memcpy(cblock, block, xfs_btree_block_len(cur));
+ if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
+ else
+ cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
+ }
+
+ be16_add_cpu(&block->bb_level, 1);
+ xfs_btree_set_numrecs(block, 1);
+ cur->bc_nlevels++;
+ cur->bc_ptrs[level + 1] = 1;
+
+ kp = xfs_btree_key_addr(cur, 1, block);
+ ckp = xfs_btree_key_addr(cur, 1, cblock);
+ xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
+
+ cpp = xfs_btree_ptr_addr(cur, 1, cblock);
+#ifdef DEBUG
+ for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
+ error = xfs_btree_check_ptr(cur, pp, i, level);
+ if (error)
+ goto error0;
+ }
+#endif
+ xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
+
+#ifdef DEBUG
+ error = xfs_btree_check_ptr(cur, &nptr, 0, level);
+ if (error)
+ goto error0;
+#endif
+ xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
+
+ xfs_iroot_realloc(cur->bc_private.b.ip,
+ 1 - xfs_btree_get_numrecs(cblock),
+ cur->bc_private.b.whichfork);
+
+ xfs_btree_setbuf(cur, level, cbp);
+
+ /*
+ * Do all this logging at the end so that
+ * the root is at the right level.
+ */
+ xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
+ xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
+ xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
+
+ *logflags |=
+ XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
+ *stat = 1;
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ return 0;
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+/*
+ * Allocate a new root block, fill it in.
+ */
+STATIC int /* error */
+xfs_btree_new_root(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ int *stat) /* success/failure */
+{
+ struct xfs_btree_block *block; /* one half of the old root block */
+ struct xfs_buf *bp; /* buffer containing block */
+ int error; /* error return value */
+ struct xfs_buf *lbp; /* left buffer pointer */
+ struct xfs_btree_block *left; /* left btree block */
+ struct xfs_buf *nbp; /* new (root) buffer */
+ struct xfs_btree_block *new; /* new (root) btree block */
+ int nptr; /* new value for key index, 1 or 2 */
+ struct xfs_buf *rbp; /* right buffer pointer */
+ struct xfs_btree_block *right; /* right btree block */
+ union xfs_btree_ptr rptr;
+ union xfs_btree_ptr lptr;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_STATS_INC(cur, newroot);
+
+ /* initialise our start point from the cursor */
+ cur->bc_ops->init_ptr_from_cur(cur, &rptr);
+
+ /* Allocate the new block. If we can't do it, we're toast. Give up. */
+ error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat);
+ if (error)
+ goto error0;
+ if (*stat == 0)
+ goto out0;
+ XFS_BTREE_STATS_INC(cur, alloc);
+
+ /* Set up the new block. */
+ error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp);
+ if (error)
+ goto error0;
+
+ /* Set the root in the holding structure increasing the level by 1. */
+ cur->bc_ops->set_root(cur, &lptr, 1);
+
+ /*
+ * At the previous root level there are now two blocks: the old root,
+ * and the new block generated when it was split. We don't know which
+ * one the cursor is pointing at, so we set up variables "left" and
+ * "right" for each case.
+ */
+ block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
+ if (error)
+ goto error0;
+#endif
+
+ xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
+ if (!xfs_btree_ptr_is_null(cur, &rptr)) {
+ /* Our block is left, pick up the right block. */
+ lbp = bp;
+ xfs_btree_buf_to_ptr(cur, lbp, &lptr);
+ left = block;
+ error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
+ if (error)
+ goto error0;
+ bp = rbp;
+ nptr = 1;
+ } else {
+ /* Our block is right, pick up the left block. */
+ rbp = bp;
+ xfs_btree_buf_to_ptr(cur, rbp, &rptr);
+ right = block;
+ xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
+ error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
+ if (error)
+ goto error0;
+ bp = lbp;
+ nptr = 2;
+ }
+ /* Fill in the new block's btree header and log it. */
+ xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
+ xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
+ ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
+ !xfs_btree_ptr_is_null(cur, &rptr));
+
+ /* Fill in the key data in the new root. */
+ if (xfs_btree_get_level(left) > 0) {
+ xfs_btree_copy_keys(cur,
+ xfs_btree_key_addr(cur, 1, new),
+ xfs_btree_key_addr(cur, 1, left), 1);
+ xfs_btree_copy_keys(cur,
+ xfs_btree_key_addr(cur, 2, new),
+ xfs_btree_key_addr(cur, 1, right), 1);
+ } else {
+ cur->bc_ops->init_key_from_rec(
+ xfs_btree_key_addr(cur, 1, new),
+ xfs_btree_rec_addr(cur, 1, left));
+ cur->bc_ops->init_key_from_rec(
+ xfs_btree_key_addr(cur, 2, new),
+ xfs_btree_rec_addr(cur, 1, right));
+ }
+ xfs_btree_log_keys(cur, nbp, 1, 2);
+
+ /* Fill in the pointer data in the new root. */
+ xfs_btree_copy_ptrs(cur,
+ xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
+ xfs_btree_copy_ptrs(cur,
+ xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
+ xfs_btree_log_ptrs(cur, nbp, 1, 2);
+
+ /* Fix up the cursor. */
+ xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
+ cur->bc_ptrs[cur->bc_nlevels] = nptr;
+ cur->bc_nlevels++;
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+out0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+}
+
+STATIC int
+xfs_btree_make_block_unfull(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ int level, /* btree level */
+ int numrecs,/* # of recs in block */
+ int *oindex,/* old tree index */
+ int *index, /* new tree index */
+ union xfs_btree_ptr *nptr, /* new btree ptr */
+ struct xfs_btree_cur **ncur, /* new btree cursor */
+ union xfs_btree_rec *nrec, /* new record */
+ int *stat)
+{
+ union xfs_btree_key key; /* new btree key value */
+ int error = 0;
+
+ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+ level == cur->bc_nlevels - 1) {
+ struct xfs_inode *ip = cur->bc_private.b.ip;
+
+ if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
+ /* A root block that can be made bigger. */
+ xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
+ } else {
+ /* A root block that needs replacing */
+ int logflags = 0;
+
+ error = xfs_btree_new_iroot(cur, &logflags, stat);
+ if (error || *stat == 0)
+ return error;
+
+ xfs_trans_log_inode(cur->bc_tp, ip, logflags);
+ }
+
+ return 0;
+ }
+
+ /* First, try shifting an entry to the right neighbor. */
+ error = xfs_btree_rshift(cur, level, stat);
+ if (error || *stat)
+ return error;
+
+ /* Next, try shifting an entry to the left neighbor. */
+ error = xfs_btree_lshift(cur, level, stat);
+ if (error)
+ return error;
+
+ if (*stat) {
+ *oindex = *index = cur->bc_ptrs[level];
+ return 0;
+ }
+
+ /*
+ * Next, try splitting the current block in half.
+ *
+ * If this works we have to re-set our variables because we
+ * could be in a different block now.
+ */
+ error = xfs_btree_split(cur, level, nptr, &key, ncur, stat);
+ if (error || *stat == 0)
+ return error;
+
+
+ *index = cur->bc_ptrs[level];
+ cur->bc_ops->init_rec_from_key(&key, nrec);
+ return 0;
+}
+
+/*
+ * Insert one record/level. Return information to the caller
+ * allowing the next level up to proceed if necessary.
+ */
+STATIC int
+xfs_btree_insrec(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ int level, /* level to insert record at */
+ union xfs_btree_ptr *ptrp, /* i/o: block number inserted */
+ union xfs_btree_rec *recp, /* i/o: record data inserted */
+ struct xfs_btree_cur **curp, /* output: new cursor replacing cur */
+ int *stat) /* success/failure */
+{
+ struct xfs_btree_block *block; /* btree block */
+ struct xfs_buf *bp; /* buffer for block */
+ union xfs_btree_key key; /* btree key */
+ union xfs_btree_ptr nptr; /* new block ptr */
+ struct xfs_btree_cur *ncur; /* new btree cursor */
+ union xfs_btree_rec nrec; /* new record count */
+ int optr; /* old key/record index */
+ int ptr; /* key/record index */
+ int numrecs;/* number of records */
+ int error; /* error return value */
+#ifdef DEBUG
+ int i;
+#endif
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, recp);
+
+ ncur = NULL;
+
+ /*
+ * If we have an external root pointer, and we've made it to the
+ * root level, allocate a new root block and we're done.
+ */
+ if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+ (level >= cur->bc_nlevels)) {
+ error = xfs_btree_new_root(cur, stat);
+ xfs_btree_set_ptr_null(cur, ptrp);
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ return error;
+ }
+
+ /* If we're off the left edge, return failure. */
+ ptr = cur->bc_ptrs[level];
+ if (ptr == 0) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+ }
+
+ /* Make a key out of the record data to be inserted, and save it. */
+ cur->bc_ops->init_key_from_rec(&key, recp);
+
+ optr = ptr;
+
+ XFS_BTREE_STATS_INC(cur, insrec);
+
+ /* Get pointers to the btree buffer and block. */
+ block = xfs_btree_get_block(cur, level, &bp);
+ numrecs = xfs_btree_get_numrecs(block);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error)
+ goto error0;
+
+ /* Check that the new entry is being inserted in the right place. */
+ if (ptr <= numrecs) {
+ if (level == 0) {
+ ASSERT(cur->bc_ops->recs_inorder(cur, recp,
+ xfs_btree_rec_addr(cur, ptr, block)));
+ } else {
+ ASSERT(cur->bc_ops->keys_inorder(cur, &key,
+ xfs_btree_key_addr(cur, ptr, block)));
+ }
+ }
+#endif
+
+ /*
+ * If the block is full, we can't insert the new entry until we
+ * make the block un-full.
+ */
+ xfs_btree_set_ptr_null(cur, &nptr);
+ if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
+ error = xfs_btree_make_block_unfull(cur, level, numrecs,
+ &optr, &ptr, &nptr, &ncur, &nrec, stat);
+ if (error || *stat == 0)
+ goto error0;
+ }
+
+ /*
+ * The current block may have changed if the block was
+ * previously full and we have just made space in it.
+ */
+ block = xfs_btree_get_block(cur, level, &bp);
+ numrecs = xfs_btree_get_numrecs(block);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error)
+ return error;
+#endif
+
+ /*
+ * At this point we know there's room for our new entry in the block
+ * we're pointing at.
+ */
+ XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
+
+ if (level > 0) {
+ /* It's a nonleaf. make a hole in the keys and ptrs */
+ union xfs_btree_key *kp;
+ union xfs_btree_ptr *pp;
+
+ kp = xfs_btree_key_addr(cur, ptr, block);
+ pp = xfs_btree_ptr_addr(cur, ptr, block);
+
+#ifdef DEBUG
+ for (i = numrecs - ptr; i >= 0; i--) {
+ error = xfs_btree_check_ptr(cur, pp, i, level);
+ if (error)
+ return error;
+ }
+#endif
+
+ xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
+ xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
+
+#ifdef DEBUG
+ error = xfs_btree_check_ptr(cur, ptrp, 0, level);
+ if (error)
+ goto error0;
+#endif
+
+ /* Now put the new data in, bump numrecs and log it. */
+ xfs_btree_copy_keys(cur, kp, &key, 1);
+ xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
+ numrecs++;
+ xfs_btree_set_numrecs(block, numrecs);
+ xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
+ xfs_btree_log_keys(cur, bp, ptr, numrecs);
+#ifdef DEBUG
+ if (ptr < numrecs) {
+ ASSERT(cur->bc_ops->keys_inorder(cur, kp,
+ xfs_btree_key_addr(cur, ptr + 1, block)));
+ }
+#endif
+ } else {
+ /* It's a leaf. make a hole in the records */
+ union xfs_btree_rec *rp;
+
+ rp = xfs_btree_rec_addr(cur, ptr, block);
+
+ xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
+
+ /* Now put the new data in, bump numrecs and log it. */
+ xfs_btree_copy_recs(cur, rp, recp, 1);
+ xfs_btree_set_numrecs(block, ++numrecs);
+ xfs_btree_log_recs(cur, bp, ptr, numrecs);
+#ifdef DEBUG
+ if (ptr < numrecs) {
+ ASSERT(cur->bc_ops->recs_inorder(cur, rp,
+ xfs_btree_rec_addr(cur, ptr + 1, block)));
+ }
+#endif
+ }
+
+ /* Log the new number of records in the btree header. */
+ xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
+
+ /* If we inserted at the start of a block, update the parents' keys. */
+ if (optr == 1) {
+ error = xfs_btree_updkey(cur, &key, level + 1);
+ if (error)
+ goto error0;
+ }
+
+ /*
+ * If we are tracking the last record in the tree and
+ * we are at the far right edge of the tree, update it.
+ */
+ if (xfs_btree_is_lastrec(cur, block, level)) {
+ cur->bc_ops->update_lastrec(cur, block, recp,
+ ptr, LASTREC_INSREC);
+ }
+
+ /*
+ * Return the new block number, if any.
+ * If there is one, give back a record value and a cursor too.
+ */
+ *ptrp = nptr;
+ if (!xfs_btree_ptr_is_null(cur, &nptr)) {
+ *recp = nrec;
+ *curp = ncur;
+ }
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+/*
+ * Insert the record at the point referenced by cur.
+ *
+ * A multi-level split of the tree on insert will invalidate the original
+ * cursor. All callers of this function should assume that the cursor is
+ * no longer valid and revalidate it.
+ */
+int
+xfs_btree_insert(
+ struct xfs_btree_cur *cur,
+ int *stat)
+{
+ int error; /* error return value */
+ int i; /* result value, 0 for failure */
+ int level; /* current level number in btree */
+ union xfs_btree_ptr nptr; /* new block number (split result) */
+ struct xfs_btree_cur *ncur; /* new cursor (split result) */
+ struct xfs_btree_cur *pcur; /* previous level's cursor */
+ union xfs_btree_rec rec; /* record to insert */
+
+ level = 0;
+ ncur = NULL;
+ pcur = cur;
+
+ xfs_btree_set_ptr_null(cur, &nptr);
+ cur->bc_ops->init_rec_from_cur(cur, &rec);
+
+ /*
+ * Loop going up the tree, starting at the leaf level.
+ * Stop when we don't get a split block, that must mean that
+ * the insert is finished with this level.
+ */
+ do {
+ /*
+ * Insert nrec/nptr into this level of the tree.
+ * Note if we fail, nptr will be null.
+ */
+ error = xfs_btree_insrec(pcur, level, &nptr, &rec, &ncur, &i);
+ if (error) {
+ if (pcur != cur)
+ xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
+ goto error0;
+ }
+
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ level++;
+
+ /*
+ * See if the cursor we just used is trash.
+ * Can't trash the caller's cursor, but otherwise we should
+ * if ncur is a new cursor or we're about to be done.
+ */
+ if (pcur != cur &&
+ (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
+ /* Save the state from the cursor before we trash it */
+ if (cur->bc_ops->update_cursor)
+ cur->bc_ops->update_cursor(pcur, cur);
+ cur->bc_nlevels = pcur->bc_nlevels;
+ xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
+ }
+ /* If we got a new cursor, switch to it. */
+ if (ncur) {
+ pcur = ncur;
+ ncur = NULL;
+ }
+ } while (!xfs_btree_ptr_is_null(cur, &nptr));
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = i;
+ return 0;
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+/*
+ * Try to merge a non-leaf block back into the inode root.
+ *
+ * Note: the killroot names comes from the fact that we're effectively
+ * killing the old root block. But because we can't just delete the
+ * inode we have to copy the single block it was pointing to into the
+ * inode.
+ */
+STATIC int
+xfs_btree_kill_iroot(
+ struct xfs_btree_cur *cur)
+{
+ int whichfork = cur->bc_private.b.whichfork;
+ struct xfs_inode *ip = cur->bc_private.b.ip;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_btree_block *block;
+ struct xfs_btree_block *cblock;
+ union xfs_btree_key *kp;
+ union xfs_btree_key *ckp;
+ union xfs_btree_ptr *pp;
+ union xfs_btree_ptr *cpp;
+ struct xfs_buf *cbp;
+ int level;
+ int index;
+ int numrecs;
+#ifdef DEBUG
+ union xfs_btree_ptr ptr;
+ int i;
+#endif
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+
+ ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
+ ASSERT(cur->bc_nlevels > 1);
+
+ /*
+ * Don't deal with the root block needs to be a leaf case.
+ * We're just going to turn the thing back into extents anyway.
+ */
+ level = cur->bc_nlevels - 1;
+ if (level == 1)
+ goto out0;
+
+ /*
+ * Give up if the root has multiple children.
+ */
+ block = xfs_btree_get_iroot(cur);
+ if (xfs_btree_get_numrecs(block) != 1)
+ goto out0;
+
+ cblock = xfs_btree_get_block(cur, level - 1, &cbp);
+ numrecs = xfs_btree_get_numrecs(cblock);
+
+ /*
+ * Only do this if the next level will fit.
+ * Then the data must be copied up to the inode,
+ * instead of freeing the root you free the next level.
+ */
+ if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
+ goto out0;
+
+ XFS_BTREE_STATS_INC(cur, killroot);
+
+#ifdef DEBUG
+ xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
+ ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
+ xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
+ ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
+#endif
+
+ index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
+ if (index) {
+ xfs_iroot_realloc(cur->bc_private.b.ip, index,
+ cur->bc_private.b.whichfork);
+ block = ifp->if_broot;
+ }
+
+ be16_add_cpu(&block->bb_numrecs, index);
+ ASSERT(block->bb_numrecs == cblock->bb_numrecs);
+
+ kp = xfs_btree_key_addr(cur, 1, block);
+ ckp = xfs_btree_key_addr(cur, 1, cblock);
+ xfs_btree_copy_keys(cur, kp, ckp, numrecs);
+
+ pp = xfs_btree_ptr_addr(cur, 1, block);
+ cpp = xfs_btree_ptr_addr(cur, 1, cblock);
+#ifdef DEBUG
+ for (i = 0; i < numrecs; i++) {
+ int error;
+
+ error = xfs_btree_check_ptr(cur, cpp, i, level - 1);
+ if (error) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+ }
+ }
+#endif
+ xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
+
+ cur->bc_ops->free_block(cur, cbp);
+ XFS_BTREE_STATS_INC(cur, free);
+
+ cur->bc_bufs[level - 1] = NULL;
+ be16_add_cpu(&block->bb_level, -1);
+ xfs_trans_log_inode(cur->bc_tp, ip,
+ XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
+ cur->bc_nlevels--;
+out0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ return 0;
+}
+
+/*
+ * Kill the current root node, and replace it with it's only child node.
+ */
+STATIC int
+xfs_btree_kill_root(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp,
+ int level,
+ union xfs_btree_ptr *newroot)
+{
+ int error;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_STATS_INC(cur, killroot);
+
+ /*
+ * Update the root pointer, decreasing the level by 1 and then
+ * free the old root.
+ */
+ cur->bc_ops->set_root(cur, newroot, -1);
+
+ error = cur->bc_ops->free_block(cur, bp);
+ if (error) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+ }
+
+ XFS_BTREE_STATS_INC(cur, free);
+
+ cur->bc_bufs[level] = NULL;
+ cur->bc_ra[level] = 0;
+ cur->bc_nlevels--;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ return 0;
+}
+
+STATIC int
+xfs_btree_dec_cursor(
+ struct xfs_btree_cur *cur,
+ int level,
+ int *stat)
+{
+ int error;
+ int i;
+
+ if (level > 0) {
+ error = xfs_btree_decrement(cur, level, &i);
+ if (error)
+ return error;
+ }
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+}
+
+/*
+ * Single level of the btree record deletion routine.
+ * Delete record pointed to by cur/level.
+ * Remove the record from its block then rebalance the tree.
+ * Return 0 for error, 1 for done, 2 to go on to the next level.
+ */
+STATIC int /* error */
+xfs_btree_delrec(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ int level, /* level removing record from */
+ int *stat) /* fail/done/go-on */
+{
+ struct xfs_btree_block *block; /* btree block */
+ union xfs_btree_ptr cptr; /* current block ptr */
+ struct xfs_buf *bp; /* buffer for block */
+ int error; /* error return value */
+ int i; /* loop counter */
+ union xfs_btree_key key; /* storage for keyp */
+ union xfs_btree_key *keyp = &key; /* passed to the next level */
+ union xfs_btree_ptr lptr; /* left sibling block ptr */
+ struct xfs_buf *lbp; /* left buffer pointer */
+ struct xfs_btree_block *left; /* left btree block */
+ int lrecs = 0; /* left record count */
+ int ptr; /* key/record index */
+ union xfs_btree_ptr rptr; /* right sibling block ptr */
+ struct xfs_buf *rbp; /* right buffer pointer */
+ struct xfs_btree_block *right; /* right btree block */
+ struct xfs_btree_block *rrblock; /* right-right btree block */
+ struct xfs_buf *rrbp; /* right-right buffer pointer */
+ int rrecs = 0; /* right record count */
+ struct xfs_btree_cur *tcur; /* temporary btree cursor */
+ int numrecs; /* temporary numrec count */
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_TRACE_ARGI(cur, level);
+
+ tcur = NULL;
+
+ /* Get the index of the entry being deleted, check for nothing there. */
+ ptr = cur->bc_ptrs[level];
+ if (ptr == 0) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+ }
+
+ /* Get the buffer & block containing the record or key/ptr. */
+ block = xfs_btree_get_block(cur, level, &bp);
+ numrecs = xfs_btree_get_numrecs(block);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error)
+ goto error0;
+#endif
+
+ /* Fail if we're off the end of the block. */
+ if (ptr > numrecs) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+ }
+
+ XFS_BTREE_STATS_INC(cur, delrec);
+ XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
+
+ /* Excise the entries being deleted. */
+ if (level > 0) {
+ /* It's a nonleaf. operate on keys and ptrs */
+ union xfs_btree_key *lkp;
+ union xfs_btree_ptr *lpp;
+
+ lkp = xfs_btree_key_addr(cur, ptr + 1, block);
+ lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
+
+#ifdef DEBUG
+ for (i = 0; i < numrecs - ptr; i++) {
+ error = xfs_btree_check_ptr(cur, lpp, i, level);
+ if (error)
+ goto error0;
+ }
+#endif
+
+ if (ptr < numrecs) {
+ xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
+ xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
+ xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
+ xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
+ }
+
+ /*
+ * If it's the first record in the block, we'll need to pass a
+ * key up to the next level (updkey).
+ */
+ if (ptr == 1)
+ keyp = xfs_btree_key_addr(cur, 1, block);
+ } else {
+ /* It's a leaf. operate on records */
+ if (ptr < numrecs) {
+ xfs_btree_shift_recs(cur,
+ xfs_btree_rec_addr(cur, ptr + 1, block),
+ -1, numrecs - ptr);
+ xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
+ }
+
+ /*
+ * If it's the first record in the block, we'll need a key
+ * structure to pass up to the next level (updkey).
+ */
+ if (ptr == 1) {
+ cur->bc_ops->init_key_from_rec(&key,
+ xfs_btree_rec_addr(cur, 1, block));
+ keyp = &key;
+ }
+ }
+
+ /*
+ * Decrement and log the number of entries in the block.
+ */
+ xfs_btree_set_numrecs(block, --numrecs);
+ xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
+
+ /*
+ * If we are tracking the last record in the tree and
+ * we are at the far right edge of the tree, update it.
+ */
+ if (xfs_btree_is_lastrec(cur, block, level)) {
+ cur->bc_ops->update_lastrec(cur, block, NULL,
+ ptr, LASTREC_DELREC);
+ }
+
+ /*
+ * We're at the root level. First, shrink the root block in-memory.
+ * Try to get rid of the next level down. If we can't then there's
+ * nothing left to do.
+ */
+ if (level == cur->bc_nlevels - 1) {
+ if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
+ xfs_iroot_realloc(cur->bc_private.b.ip, -1,
+ cur->bc_private.b.whichfork);
+
+ error = xfs_btree_kill_iroot(cur);
+ if (error)
+ goto error0;
+
+ error = xfs_btree_dec_cursor(cur, level, stat);
+ if (error)
+ goto error0;
+ *stat = 1;
+ return 0;
+ }
+
+ /*
+ * If this is the root level, and there's only one entry left,
+ * and it's NOT the leaf level, then we can get rid of this
+ * level.
+ */
+ if (numrecs == 1 && level > 0) {
+ union xfs_btree_ptr *pp;
+ /*
+ * pp is still set to the first pointer in the block.
+ * Make it the new root of the btree.
+ */
+ pp = xfs_btree_ptr_addr(cur, 1, block);
+ error = xfs_btree_kill_root(cur, bp, level, pp);
+ if (error)
+ goto error0;
+ } else if (level > 0) {
+ error = xfs_btree_dec_cursor(cur, level, stat);
+ if (error)
+ goto error0;
+ }
+ *stat = 1;
+ return 0;
+ }
+
+ /*
+ * If we deleted the leftmost entry in the block, update the
+ * key values above us in the tree.
+ */
+ if (ptr == 1) {
+ error = xfs_btree_updkey(cur, keyp, level + 1);
+ if (error)
+ goto error0;
+ }
+
+ /*
+ * If the number of records remaining in the block is at least
+ * the minimum, we're done.
+ */
+ if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
+ error = xfs_btree_dec_cursor(cur, level, stat);
+ if (error)
+ goto error0;
+ return 0;
+ }
+
+ /*
+ * Otherwise, we have to move some records around to keep the
+ * tree balanced. Look at the left and right sibling blocks to
+ * see if we can re-balance by moving only one record.
+ */
+ xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
+ xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
+
+ if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
+ /*
+ * One child of root, need to get a chance to copy its contents
+ * into the root and delete it. Can't go up to next level,
+ * there's nothing to delete there.
+ */
+ if (xfs_btree_ptr_is_null(cur, &rptr) &&
+ xfs_btree_ptr_is_null(cur, &lptr) &&
+ level == cur->bc_nlevels - 2) {
+ error = xfs_btree_kill_iroot(cur);
+ if (!error)
+ error = xfs_btree_dec_cursor(cur, level, stat);
+ if (error)
+ goto error0;
+ return 0;
+ }
+ }
+
+ ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
+ !xfs_btree_ptr_is_null(cur, &lptr));
+
+ /*
+ * Duplicate the cursor so our btree manipulations here won't
+ * disrupt the next level up.
+ */
+ error = xfs_btree_dup_cursor(cur, &tcur);
+ if (error)
+ goto error0;
+
+ /*
+ * If there's a right sibling, see if it's ok to shift an entry
+ * out of it.
+ */
+ if (!xfs_btree_ptr_is_null(cur, &rptr)) {
+ /*
+ * Move the temp cursor to the last entry in the next block.
+ * Actually any entry but the first would suffice.
+ */
+ i = xfs_btree_lastrec(tcur, level);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+
+ error = xfs_btree_increment(tcur, level, &i);
+ if (error)
+ goto error0;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+
+ i = xfs_btree_lastrec(tcur, level);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+
+ /* Grab a pointer to the block. */
+ right = xfs_btree_get_block(tcur, level, &rbp);
+#ifdef DEBUG
+ error = xfs_btree_check_block(tcur, right, level, rbp);
+ if (error)
+ goto error0;
+#endif
+ /* Grab the current block number, for future use. */
+ xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
+
+ /*
+ * If right block is full enough so that removing one entry
+ * won't make it too empty, and left-shifting an entry out
+ * of right to us works, we're done.
+ */
+ if (xfs_btree_get_numrecs(right) - 1 >=
+ cur->bc_ops->get_minrecs(tcur, level)) {
+ error = xfs_btree_lshift(tcur, level, &i);
+ if (error)
+ goto error0;
+ if (i) {
+ ASSERT(xfs_btree_get_numrecs(block) >=
+ cur->bc_ops->get_minrecs(tcur, level));
+
+ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+ tcur = NULL;
+
+ error = xfs_btree_dec_cursor(cur, level, stat);
+ if (error)
+ goto error0;
+ return 0;
+ }
+ }
+
+ /*
+ * Otherwise, grab the number of records in right for
+ * future reference, and fix up the temp cursor to point
+ * to our block again (last record).
+ */
+ rrecs = xfs_btree_get_numrecs(right);
+ if (!xfs_btree_ptr_is_null(cur, &lptr)) {
+ i = xfs_btree_firstrec(tcur, level);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+
+ error = xfs_btree_decrement(tcur, level, &i);
+ if (error)
+ goto error0;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+ }
+ }
+
+ /*
+ * If there's a left sibling, see if it's ok to shift an entry
+ * out of it.
+ */
+ if (!xfs_btree_ptr_is_null(cur, &lptr)) {
+ /*
+ * Move the temp cursor to the first entry in the
+ * previous block.
+ */
+ i = xfs_btree_firstrec(tcur, level);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+
+ error = xfs_btree_decrement(tcur, level, &i);
+ if (error)
+ goto error0;
+ i = xfs_btree_firstrec(tcur, level);
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+
+ /* Grab a pointer to the block. */
+ left = xfs_btree_get_block(tcur, level, &lbp);
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, left, level, lbp);
+ if (error)
+ goto error0;
+#endif
+ /* Grab the current block number, for future use. */
+ xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
+
+ /*
+ * If left block is full enough so that removing one entry
+ * won't make it too empty, and right-shifting an entry out
+ * of left to us works, we're done.
+ */
+ if (xfs_btree_get_numrecs(left) - 1 >=
+ cur->bc_ops->get_minrecs(tcur, level)) {
+ error = xfs_btree_rshift(tcur, level, &i);
+ if (error)
+ goto error0;
+ if (i) {
+ ASSERT(xfs_btree_get_numrecs(block) >=
+ cur->bc_ops->get_minrecs(tcur, level));
+ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+ tcur = NULL;
+ if (level == 0)
+ cur->bc_ptrs[0]++;
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+ }
+ }
+
+ /*
+ * Otherwise, grab the number of records in right for
+ * future reference.
+ */
+ lrecs = xfs_btree_get_numrecs(left);
+ }
+
+ /* Delete the temp cursor, we're done with it. */
+ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+ tcur = NULL;
+
+ /* If here, we need to do a join to keep the tree balanced. */
+ ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
+
+ if (!xfs_btree_ptr_is_null(cur, &lptr) &&
+ lrecs + xfs_btree_get_numrecs(block) <=
+ cur->bc_ops->get_maxrecs(cur, level)) {
+ /*
+ * Set "right" to be the starting block,
+ * "left" to be the left neighbor.
+ */
+ rptr = cptr;
+ right = block;
+ rbp = bp;
+ error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
+ if (error)
+ goto error0;
+
+ /*
+ * If that won't work, see if we can join with the right neighbor block.
+ */
+ } else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
+ rrecs + xfs_btree_get_numrecs(block) <=
+ cur->bc_ops->get_maxrecs(cur, level)) {
+ /*
+ * Set "left" to be the starting block,
+ * "right" to be the right neighbor.
+ */
+ lptr = cptr;
+ left = block;
+ lbp = bp;
+ error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
+ if (error)
+ goto error0;
+
+ /*
+ * Otherwise, we can't fix the imbalance.
+ * Just return. This is probably a logic error, but it's not fatal.
+ */
+ } else {
+ error = xfs_btree_dec_cursor(cur, level, stat);
+ if (error)
+ goto error0;
+ return 0;
+ }
+
+ rrecs = xfs_btree_get_numrecs(right);
+ lrecs = xfs_btree_get_numrecs(left);
+
+ /*
+ * We're now going to join "left" and "right" by moving all the stuff
+ * in "right" to "left" and deleting "right".
+ */
+ XFS_BTREE_STATS_ADD(cur, moves, rrecs);
+ if (level > 0) {
+ /* It's a non-leaf. Move keys and pointers. */
+ union xfs_btree_key *lkp; /* left btree key */
+ union xfs_btree_ptr *lpp; /* left address pointer */
+ union xfs_btree_key *rkp; /* right btree key */
+ union xfs_btree_ptr *rpp; /* right address pointer */
+
+ lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
+ lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
+ rkp = xfs_btree_key_addr(cur, 1, right);
+ rpp = xfs_btree_ptr_addr(cur, 1, right);
+#ifdef DEBUG
+ for (i = 1; i < rrecs; i++) {
+ error = xfs_btree_check_ptr(cur, rpp, i, level);
+ if (error)
+ goto error0;
+ }
+#endif
+ xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
+ xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
+
+ xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
+ xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
+ } else {
+ /* It's a leaf. Move records. */
+ union xfs_btree_rec *lrp; /* left record pointer */
+ union xfs_btree_rec *rrp; /* right record pointer */
+
+ lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
+ rrp = xfs_btree_rec_addr(cur, 1, right);
+
+ xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
+ xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
+ }
+
+ XFS_BTREE_STATS_INC(cur, join);
+
+ /*
+ * Fix up the number of records and right block pointer in the
+ * surviving block, and log it.
+ */
+ xfs_btree_set_numrecs(left, lrecs + rrecs);
+ xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB),
+ xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
+ xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
+
+ /* If there is a right sibling, point it to the remaining block. */
+ xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
+ if (!xfs_btree_ptr_is_null(cur, &cptr)) {
+ error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
+ if (error)
+ goto error0;
+ xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
+ xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
+ }
+
+ /* Free the deleted block. */
+ error = cur->bc_ops->free_block(cur, rbp);
+ if (error)
+ goto error0;
+ XFS_BTREE_STATS_INC(cur, free);
+
+ /*
+ * If we joined with the left neighbor, set the buffer in the
+ * cursor to the left block, and fix up the index.
+ */
+ if (bp != lbp) {
+ cur->bc_bufs[level] = lbp;
+ cur->bc_ptrs[level] += lrecs;
+ cur->bc_ra[level] = 0;
+ }
+ /*
+ * If we joined with the right neighbor and there's a level above
+ * us, increment the cursor at that level.
+ */
+ else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
+ (level + 1 < cur->bc_nlevels)) {
+ error = xfs_btree_increment(cur, level + 1, &i);
+ if (error)
+ goto error0;
+ }
+
+ /*
+ * Readjust the ptr at this level if it's not a leaf, since it's
+ * still pointing at the deletion point, which makes the cursor
+ * inconsistent. If this makes the ptr 0, the caller fixes it up.
+ * We can't use decrement because it would change the next level up.
+ */
+ if (level > 0)
+ cur->bc_ptrs[level]--;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ /* Return value means the next level up has something to do. */
+ *stat = 2;
+ return 0;
+
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ if (tcur)
+ xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+ return error;
+}
+
+/*
+ * Delete the record pointed to by cur.
+ * The cursor refers to the place where the record was (could be inserted)
+ * when the operation returns.
+ */
+int /* error */
+xfs_btree_delete(
+ struct xfs_btree_cur *cur,
+ int *stat) /* success/failure */
+{
+ int error; /* error return value */
+ int level;
+ int i;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+
+ /*
+ * Go up the tree, starting at leaf level.
+ *
+ * If 2 is returned then a join was done; go to the next level.
+ * Otherwise we are done.
+ */
+ for (level = 0, i = 2; i == 2; level++) {
+ error = xfs_btree_delrec(cur, level, &i);
+ if (error)
+ goto error0;
+ }
+
+ if (i == 0) {
+ for (level = 1; level < cur->bc_nlevels; level++) {
+ if (cur->bc_ptrs[level] == 0) {
+ error = xfs_btree_decrement(cur, level, &i);
+ if (error)
+ goto error0;
+ break;
+ }
+ }
+ }
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = i;
+ return 0;
+error0:
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+}
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int /* error */
+xfs_btree_get_rec(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ union xfs_btree_rec **recp, /* output: btree record */
+ int *stat) /* output: success/failure */
+{
+ struct xfs_btree_block *block; /* btree block */
+ struct xfs_buf *bp; /* buffer pointer */
+ int ptr; /* record number */
+#ifdef DEBUG
+ int error; /* error return value */
+#endif
+
+ ptr = cur->bc_ptrs[0];
+ block = xfs_btree_get_block(cur, 0, &bp);
+
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, 0, bp);
+ if (error)
+ return error;
+#endif
+
+ /*
+ * Off the right end or left end, return failure.
+ */
+ if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
+ *stat = 0;
+ return 0;
+ }
+
+ /*
+ * Point to the record and extract its data.
+ */
+ *recp = xfs_btree_rec_addr(cur, ptr, block);
+ *stat = 1;
+ return 0;
+}
+
+/*
+ * Change the owner of a btree.
+ *
+ * The mechanism we use here is ordered buffer logging. Because we don't know
+ * how many buffers were are going to need to modify, we don't really want to
+ * have to make transaction reservations for the worst case of every buffer in a
+ * full size btree as that may be more space that we can fit in the log....
+ *
+ * We do the btree walk in the most optimal manner possible - we have sibling
+ * pointers so we can just walk all the blocks on each level from left to right
+ * in a single pass, and then move to the next level and do the same. We can
+ * also do readahead on the sibling pointers to get IO moving more quickly,
+ * though for slow disks this is unlikely to make much difference to performance
+ * as the amount of CPU work we have to do before moving to the next block is
+ * relatively small.
+ *
+ * For each btree block that we load, modify the owner appropriately, set the
+ * buffer as an ordered buffer and log it appropriately. We need to ensure that
+ * we mark the region we change dirty so that if the buffer is relogged in
+ * a subsequent transaction the changes we make here as an ordered buffer are
+ * correctly relogged in that transaction. If we are in recovery context, then
+ * just queue the modified buffer as delayed write buffer so the transaction
+ * recovery completion writes the changes to disk.
+ */
+static int
+xfs_btree_block_change_owner(
+ struct xfs_btree_cur *cur,
+ int level,
+ __uint64_t new_owner,
+ struct list_head *buffer_list)
+{
+ struct xfs_btree_block *block;
+ struct xfs_buf *bp;
+ union xfs_btree_ptr rptr;
+
+ /* do right sibling readahead */
+ xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
+
+ /* modify the owner */
+ block = xfs_btree_get_block(cur, level, &bp);
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ block->bb_u.l.bb_owner = cpu_to_be64(new_owner);
+ else
+ block->bb_u.s.bb_owner = cpu_to_be32(new_owner);
+
+ /*
+ * If the block is a root block hosted in an inode, we might not have a
+ * buffer pointer here and we shouldn't attempt to log the change as the
+ * information is already held in the inode and discarded when the root
+ * block is formatted into the on-disk inode fork. We still change it,
+ * though, so everything is consistent in memory.
+ */
+ if (bp) {
+ if (cur->bc_tp) {
+ xfs_trans_ordered_buf(cur->bc_tp, bp);
+ xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
+ } else {
+ xfs_buf_delwri_queue(bp, buffer_list);
+ }
+ } else {
+ ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
+ ASSERT(level == cur->bc_nlevels - 1);
+ }
+
+ /* now read rh sibling block for next iteration */
+ xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
+ if (xfs_btree_ptr_is_null(cur, &rptr))
+ return ENOENT;
+
+ return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
+}
+
+int
+xfs_btree_change_owner(
+ struct xfs_btree_cur *cur,
+ __uint64_t new_owner,
+ struct list_head *buffer_list)
+{
+ union xfs_btree_ptr lptr;
+ int level;
+ struct xfs_btree_block *block = NULL;
+ int error = 0;
+
+ cur->bc_ops->init_ptr_from_cur(cur, &lptr);
+
+ /* for each level */
+ for (level = cur->bc_nlevels - 1; level >= 0; level--) {
+ /* grab the left hand block */
+ error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
+ if (error)
+ return error;
+
+ /* readahead the left most block for the next level down */
+ if (level > 0) {
+ union xfs_btree_ptr *ptr;
+
+ ptr = xfs_btree_ptr_addr(cur, 1, block);
+ xfs_btree_readahead_ptr(cur, ptr, 1);
+
+ /* save for the next iteration of the loop */
+ lptr = *ptr;
+ }
+
+ /* for each buffer in the level */
+ do {
+ error = xfs_btree_block_change_owner(cur, level,
+ new_owner,
+ buffer_list);
+ } while (!error);
+
+ if (error != ENOENT)
+ return error;
+ }
+
+ return 0;
+}
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 7440b78f9ce..a04b69422f6 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -27,6 +27,31 @@ struct xfs_trans;
extern kmem_zone_t *xfs_btree_cur_zone;
/*
+ * Generic key, ptr and record wrapper structures.
+ *
+ * These are disk format structures, and are converted where necessary
+ * by the btree specific code that needs to interpret them.
+ */
+union xfs_btree_ptr {
+ __be32 s; /* short form ptr */
+ __be64 l; /* long form ptr */
+};
+
+union xfs_btree_key {
+ xfs_bmbt_key_t bmbt;
+ xfs_bmdr_key_t bmbr; /* bmbt root block */
+ xfs_alloc_key_t alloc;
+ xfs_inobt_key_t inobt;
+};
+
+union xfs_btree_rec {
+ xfs_bmbt_rec_t bmbt;
+ xfs_bmdr_rec_t bmbr; /* bmbt root block */
+ xfs_alloc_rec_t alloc;
+ xfs_inobt_rec_t inobt;
+};
+
+/*
* This nonsense is to make -wlint happy.
*/
#define XFS_LOOKUP_EQ ((xfs_lookup_t)XFS_LOOKUP_EQi)
@@ -37,104 +62,128 @@ extern kmem_zone_t *xfs_btree_cur_zone;
#define XFS_BTNUM_CNT ((xfs_btnum_t)XFS_BTNUM_CNTi)
#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi)
#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi)
+#define XFS_BTNUM_FINO ((xfs_btnum_t)XFS_BTNUM_FINOi)
/*
- * Short form header: space allocation btrees.
+ * For logging record fields.
*/
-typedef struct xfs_btree_sblock {
- __be32 bb_magic; /* magic number for block type */
- __be16 bb_level; /* 0 is a leaf */
- __be16 bb_numrecs; /* current # of data records */
- __be32 bb_leftsib; /* left sibling block or NULLAGBLOCK */
- __be32 bb_rightsib; /* right sibling block or NULLAGBLOCK */
-} xfs_btree_sblock_t;
+#define XFS_BB_MAGIC (1 << 0)
+#define XFS_BB_LEVEL (1 << 1)
+#define XFS_BB_NUMRECS (1 << 2)
+#define XFS_BB_LEFTSIB (1 << 3)
+#define XFS_BB_RIGHTSIB (1 << 4)
+#define XFS_BB_BLKNO (1 << 5)
+#define XFS_BB_LSN (1 << 6)
+#define XFS_BB_UUID (1 << 7)
+#define XFS_BB_OWNER (1 << 8)
+#define XFS_BB_NUM_BITS 5
+#define XFS_BB_ALL_BITS ((1 << XFS_BB_NUM_BITS) - 1)
+#define XFS_BB_NUM_BITS_CRC 9
+#define XFS_BB_ALL_BITS_CRC ((1 << XFS_BB_NUM_BITS_CRC) - 1)
+
+/*
+ * Generic stats interface
+ */
+#define __XFS_BTREE_STATS_INC(type, stat) \
+ XFS_STATS_INC(xs_ ## type ## _2_ ## stat)
+#define XFS_BTREE_STATS_INC(cur, stat) \
+do { \
+ switch (cur->bc_btnum) { \
+ case XFS_BTNUM_BNO: __XFS_BTREE_STATS_INC(abtb, stat); break; \
+ case XFS_BTNUM_CNT: __XFS_BTREE_STATS_INC(abtc, stat); break; \
+ case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_INC(bmbt, stat); break; \
+ case XFS_BTNUM_INO: __XFS_BTREE_STATS_INC(ibt, stat); break; \
+ case XFS_BTNUM_FINO: __XFS_BTREE_STATS_INC(fibt, stat); break; \
+ case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
+ } \
+} while (0)
+
+#define __XFS_BTREE_STATS_ADD(type, stat, val) \
+ XFS_STATS_ADD(xs_ ## type ## _2_ ## stat, val)
+#define XFS_BTREE_STATS_ADD(cur, stat, val) \
+do { \
+ switch (cur->bc_btnum) { \
+ case XFS_BTNUM_BNO: __XFS_BTREE_STATS_ADD(abtb, stat, val); break; \
+ case XFS_BTNUM_CNT: __XFS_BTREE_STATS_ADD(abtc, stat, val); break; \
+ case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_ADD(bmbt, stat, val); break; \
+ case XFS_BTNUM_INO: __XFS_BTREE_STATS_ADD(ibt, stat, val); break; \
+ case XFS_BTNUM_FINO: __XFS_BTREE_STATS_ADD(fibt, stat, val); break; \
+ case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
+ } \
+} while (0)
-/*
- * Long form header: bmap btrees.
- */
-typedef struct xfs_btree_lblock {
- __be32 bb_magic; /* magic number for block type */
- __be16 bb_level; /* 0 is a leaf */
- __be16 bb_numrecs; /* current # of data records */
- __be64 bb_leftsib; /* left sibling block or NULLDFSBNO */
- __be64 bb_rightsib; /* right sibling block or NULLDFSBNO */
-} xfs_btree_lblock_t;
+#define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */
-/*
- * Combined header and structure, used by common code.
- */
-typedef struct xfs_btree_hdr
-{
- __be32 bb_magic; /* magic number for block type */
- __be16 bb_level; /* 0 is a leaf */
- __be16 bb_numrecs; /* current # of data records */
-} xfs_btree_hdr_t;
+struct xfs_btree_ops {
+ /* size of the key and record structures */
+ size_t key_len;
+ size_t rec_len;
-typedef struct xfs_btree_block {
- xfs_btree_hdr_t bb_h; /* header */
- union {
- struct {
- __be32 bb_leftsib;
- __be32 bb_rightsib;
- } s; /* short form pointers */
- struct {
- __be64 bb_leftsib;
- __be64 bb_rightsib;
- } l; /* long form pointers */
- } bb_u; /* rest */
-} xfs_btree_block_t;
+ /* cursor operations */
+ struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
+ void (*update_cursor)(struct xfs_btree_cur *src,
+ struct xfs_btree_cur *dst);
-/*
- * For logging record fields.
- */
-#define XFS_BB_MAGIC 0x01
-#define XFS_BB_LEVEL 0x02
-#define XFS_BB_NUMRECS 0x04
-#define XFS_BB_LEFTSIB 0x08
-#define XFS_BB_RIGHTSIB 0x10
-#define XFS_BB_NUM_BITS 5
-#define XFS_BB_ALL_BITS ((1 << XFS_BB_NUM_BITS) - 1)
+ /* update btree root pointer */
+ void (*set_root)(struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *nptr, int level_change);
-/*
- * Boolean to select which form of xfs_btree_block_t.bb_u to use.
- */
-#define XFS_BTREE_LONG_PTRS(btnum) ((btnum) == XFS_BTNUM_BMAP)
+ /* block allocation / freeing */
+ int (*alloc_block)(struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start_bno,
+ union xfs_btree_ptr *new_bno,
+ int *stat);
+ int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp);
-/*
- * Magic numbers for btree blocks.
- */
-extern const __uint32_t xfs_magics[];
+ /* update last record information */
+ void (*update_lastrec)(struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_rec *rec,
+ int ptr, int reason);
-/*
- * Maximum and minimum records in a btree block.
- * Given block size, type prefix, and leaf flag (0 or 1).
- * The divisor below is equivalent to lf ? (e1) : (e2) but that produces
- * compiler warnings.
- */
-#define XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) \
- ((int)(((bsz) - (uint)sizeof(t ## _block_t)) / \
- (((lf) * (uint)sizeof(t ## _rec_t)) + \
- ((1 - (lf)) * \
- ((uint)sizeof(t ## _key_t) + (uint)sizeof(t ## _ptr_t))))))
-#define XFS_BTREE_BLOCK_MINRECS(bsz,t,lf) \
- (XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) / 2)
+ /* records in block/level */
+ int (*get_minrecs)(struct xfs_btree_cur *cur, int level);
+ int (*get_maxrecs)(struct xfs_btree_cur *cur, int level);
+
+ /* records on disk. Matter for the root in inode case. */
+ int (*get_dmaxrecs)(struct xfs_btree_cur *cur, int level);
+
+ /* init values of btree structures */
+ void (*init_key_from_rec)(union xfs_btree_key *key,
+ union xfs_btree_rec *rec);
+ void (*init_rec_from_key)(union xfs_btree_key *key,
+ union xfs_btree_rec *rec);
+ void (*init_rec_from_cur)(struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec);
+ void (*init_ptr_from_cur)(struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr);
+
+ /* difference between key value and cursor value */
+ __int64_t (*key_diff)(struct xfs_btree_cur *cur,
+ union xfs_btree_key *key);
+
+ const struct xfs_buf_ops *buf_ops;
+
+#if defined(DEBUG) || defined(XFS_WARN)
+ /* check that k1 is lower than k2 */
+ int (*keys_inorder)(struct xfs_btree_cur *cur,
+ union xfs_btree_key *k1,
+ union xfs_btree_key *k2);
+
+ /* check that r1 is lower than r2 */
+ int (*recs_inorder)(struct xfs_btree_cur *cur,
+ union xfs_btree_rec *r1,
+ union xfs_btree_rec *r2);
+#endif
+};
/*
- * Record, key, and pointer address calculation macros.
- * Given block size, type prefix, block pointer, and index of requested entry
- * (first entry numbered 1).
+ * Reasons for the update_lastrec method to be called.
*/
-#define XFS_BTREE_REC_ADDR(t,bb,i) \
- ((t ## _rec_t *)((char *)(bb) + sizeof(t ## _block_t) + \
- ((i) - 1) * sizeof(t ## _rec_t)))
-#define XFS_BTREE_KEY_ADDR(t,bb,i) \
- ((t ## _key_t *)((char *)(bb) + sizeof(t ## _block_t) + \
- ((i) - 1) * sizeof(t ## _key_t)))
-#define XFS_BTREE_PTR_ADDR(t,bb,i,mxr) \
- ((t ## _ptr_t *)((char *)(bb) + sizeof(t ## _block_t) + \
- (mxr) * sizeof(t ## _key_t) + ((i) - 1) * sizeof(t ## _ptr_t)))
+#define LASTREC_UPDATE 0
+#define LASTREC_INSREC 1
+#define LASTREC_DELREC 2
-#define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */
/*
* Btree cursor structure.
@@ -144,6 +193,8 @@ typedef struct xfs_btree_cur
{
struct xfs_trans *bc_tp; /* transaction we're in, if any */
struct xfs_mount *bc_mp; /* file system mount struct */
+ const struct xfs_btree_ops *bc_ops;
+ uint bc_flags; /* btree features - below */
union {
xfs_alloc_rec_incore_t a;
xfs_bmbt_irec_t b;
@@ -158,8 +209,8 @@ typedef struct xfs_btree_cur
__uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */
xfs_btnum_t bc_btnum; /* identifies which btree type */
union {
- struct { /* needed for BNO, CNT */
- struct xfs_buf *agbp; /* agf buffer pointer */
+ struct { /* needed for BNO, CNT, INO */
+ struct xfs_buf *agbp; /* agf/agi buffer pointer */
xfs_agnumber_t agno; /* ag number */
} a;
struct { /* needed for BMAP */
@@ -172,101 +223,44 @@ typedef struct xfs_btree_cur
char flags; /* flags */
#define XFS_BTCUR_BPRV_WASDEL 1 /* was delayed */
} b;
- struct { /* needed for INO */
- struct xfs_buf *agbp; /* agi buffer pointer */
- xfs_agnumber_t agno; /* ag number */
- } i;
} bc_private; /* per-btree type data */
} xfs_btree_cur_t;
+/* cursor flags */
+#define XFS_BTREE_LONG_PTRS (1<<0) /* pointers are 64bits long */
+#define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */
+#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */
+#define XFS_BTREE_CRC_BLOCKS (1<<3) /* uses extended btree blocks */
+
+
#define XFS_BTREE_NOERROR 0
#define XFS_BTREE_ERROR 1
/*
* Convert from buffer to btree block header.
*/
-#define XFS_BUF_TO_BLOCK(bp) ((xfs_btree_block_t *)XFS_BUF_PTR(bp))
-#define XFS_BUF_TO_LBLOCK(bp) ((xfs_btree_lblock_t *)XFS_BUF_PTR(bp))
-#define XFS_BUF_TO_SBLOCK(bp) ((xfs_btree_sblock_t *)XFS_BUF_PTR(bp))
-
+#define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr))
-#ifdef __KERNEL__
-#ifdef DEBUG
/*
- * Debug routine: check that block header is ok.
+ * Check that block header is ok.
*/
-void
+int
xfs_btree_check_block(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_btree_block_t *block, /* generic btree block pointer */
+ struct xfs_btree_cur *cur, /* btree cursor */
+ struct xfs_btree_block *block, /* generic btree block pointer */
int level, /* level of the btree block */
struct xfs_buf *bp); /* buffer containing block, if any */
/*
- * Debug routine: check that keys are in the right order.
- */
-void
-xfs_btree_check_key(
- xfs_btnum_t btnum, /* btree identifier */
- void *ak1, /* pointer to left (lower) key */
- void *ak2); /* pointer to right (higher) key */
-
-/*
- * Debug routine: check that records are in the right order.
- */
-void
-xfs_btree_check_rec(
- xfs_btnum_t btnum, /* btree identifier */
- void *ar1, /* pointer to left (lower) record */
- void *ar2); /* pointer to right (higher) record */
-#else
-#define xfs_btree_check_block(a,b,c,d)
-#define xfs_btree_check_key(a,b,c)
-#define xfs_btree_check_rec(a,b,c)
-#endif /* DEBUG */
-
-/*
- * Checking routine: check that long form block header is ok.
- */
-int /* error (0 or EFSCORRUPTED) */
-xfs_btree_check_lblock(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_btree_lblock_t *block, /* btree long form block pointer */
- int level, /* level of the btree block */
- struct xfs_buf *bp); /* buffer containing block, if any */
-
-/*
- * Checking routine: check that (long) pointer is ok.
+ * Check that (long) pointer is ok.
*/
int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_lptr(
- xfs_btree_cur_t *cur, /* btree cursor */
+ struct xfs_btree_cur *cur, /* btree cursor */
xfs_dfsbno_t ptr, /* btree block disk address */
int level); /* btree block level */
-#define xfs_btree_check_lptr_disk(cur, ptr, level) \
- xfs_btree_check_lptr(cur, be64_to_cpu(ptr), level)
-
-/*
- * Checking routine: check that short form block header is ok.
- */
-int /* error (0 or EFSCORRUPTED) */
-xfs_btree_check_sblock(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_btree_sblock_t *block, /* btree short form block pointer */
- int level, /* level of the btree block */
- struct xfs_buf *bp); /* buffer containing block */
-
-/*
- * Checking routine: check that (short) pointer is ok.
- */
-int /* error (0 or EFSCORRUPTED) */
-xfs_btree_check_sptr(
- xfs_btree_cur_t *cur, /* btree cursor */
- xfs_agblock_t ptr, /* btree block disk address */
- int level); /* btree block level */
-
/*
* Delete the btree cursor.
*/
@@ -285,15 +279,6 @@ xfs_btree_dup_cursor(
xfs_btree_cur_t **ncur);/* output cursor */
/*
- * Change the cursor to point to the first record in the current block
- * at the given level. Other levels are unaffected.
- */
-int /* success=1, failure=0 */
-xfs_btree_firstrec(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level); /* level to change */
-
-/*
* Get a buffer for the block, return it with no data read.
* Long-form addressing.
*/
@@ -317,20 +302,6 @@ xfs_btree_get_bufs(
uint lock); /* lock flags for get_buf */
/*
- * Allocate a new btree cursor.
- * The cursor is either for allocation (A) or bmap (B).
- */
-xfs_btree_cur_t * /* new btree cursor */
-xfs_btree_init_cursor(
- struct xfs_mount *mp, /* file system mount point */
- struct xfs_trans *tp, /* transaction pointer */
- struct xfs_buf *agbp, /* (A only) buffer for agf structure */
- xfs_agnumber_t agno, /* (A only) allocation group number */
- xfs_btnum_t btnum, /* btree identifier */
- struct xfs_inode *ip, /* (B only) inode owning the btree */
- int whichfork); /* (B only) data/attr fork */
-
-/*
* Check for the cursor referring to the last block at the given level.
*/
int /* 1=is last block, 0=not last block */
@@ -339,15 +310,6 @@ xfs_btree_islastblock(
int level); /* level to check */
/*
- * Change the cursor to point to the last record in the current block
- * at the given level. Other levels are unaffected.
- */
-int /* success=1, failure=0 */
-xfs_btree_lastrec(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level); /* level to change */
-
-/*
* Compute first and last byte offsets for the fields given.
* Interprets the offsets table, which contains struct field offsets.
*/
@@ -370,21 +332,8 @@ xfs_btree_read_bufl(
xfs_fsblock_t fsbno, /* file system block number */
uint lock, /* lock flags for read_buf */
struct xfs_buf **bpp, /* buffer for fsbno */
- int refval);/* ref count value for buffer */
-
-/*
- * Get a buffer for the block, return it read in.
- * Short-form addressing.
- */
-int /* error */
-xfs_btree_read_bufs(
- struct xfs_mount *mp, /* file system mount point */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* allocation group block number */
- uint lock, /* lock flags for read_buf */
- struct xfs_buf **bpp, /* buffer for agno/agbno */
- int refval);/* ref count value for buffer */
+ int refval, /* ref count value for buffer */
+ const struct xfs_buf_ops *ops);
/*
* Read-ahead the block, don't wait for it, don't return a buffer.
@@ -394,7 +343,8 @@ void /* error */
xfs_btree_reada_bufl(
struct xfs_mount *mp, /* file system mount point */
xfs_fsblock_t fsbno, /* file system block number */
- xfs_extlen_t count); /* count of filesystem blocks */
+ xfs_extlen_t count, /* count of filesystem blocks */
+ const struct xfs_buf_ops *ops);
/*
* Read-ahead the block, don't wait for it, don't return a buffer.
@@ -405,42 +355,79 @@ xfs_btree_reada_bufs(
struct xfs_mount *mp, /* file system mount point */
xfs_agnumber_t agno, /* allocation group number */
xfs_agblock_t agbno, /* allocation group block number */
- xfs_extlen_t count); /* count of filesystem blocks */
+ xfs_extlen_t count, /* count of filesystem blocks */
+ const struct xfs_buf_ops *ops);
/*
- * Read-ahead btree blocks, at the given level.
- * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
+ * Initialise a new btree block header
*/
-int /* readahead block count */
-xfs_btree_readahead_core(
- xfs_btree_cur_t *cur, /* btree cursor */
- int lev, /* level in btree */
- int lr); /* left/right bits */
+void
+xfs_btree_init_block(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ __u32 magic,
+ __u16 level,
+ __u16 numrecs,
+ __u64 owner,
+ unsigned int flags);
-static inline int /* readahead block count */
-xfs_btree_readahead(
- xfs_btree_cur_t *cur, /* btree cursor */
- int lev, /* level in btree */
- int lr) /* left/right bits */
-{
- if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
- return 0;
+void
+xfs_btree_init_block_int(
+ struct xfs_mount *mp,
+ struct xfs_btree_block *buf,
+ xfs_daddr_t blkno,
+ __u32 magic,
+ __u16 level,
+ __u16 numrecs,
+ __u64 owner,
+ unsigned int flags);
- return xfs_btree_readahead_core(cur, lev, lr);
-}
+/*
+ * Common btree core entry points.
+ */
+int xfs_btree_increment(struct xfs_btree_cur *, int, int *);
+int xfs_btree_decrement(struct xfs_btree_cur *, int, int *);
+int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *);
+int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *);
+int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *);
+int xfs_btree_insert(struct xfs_btree_cur *, int *);
+int xfs_btree_delete(struct xfs_btree_cur *, int *);
+int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *);
+int xfs_btree_change_owner(struct xfs_btree_cur *cur, __uint64_t new_owner,
+ struct list_head *buffer_list);
+/*
+ * btree block CRC helpers
+ */
+void xfs_btree_lblock_calc_crc(struct xfs_buf *);
+bool xfs_btree_lblock_verify_crc(struct xfs_buf *);
+void xfs_btree_sblock_calc_crc(struct xfs_buf *);
+bool xfs_btree_sblock_verify_crc(struct xfs_buf *);
/*
- * Set the buffer for level "lev" in the cursor to bp, releasing
- * any previous buffer.
+ * Internal btree helpers also used by xfs_bmap.c.
*/
-void
-xfs_btree_setbuf(
- xfs_btree_cur_t *cur, /* btree cursor */
- int lev, /* level in btree */
- struct xfs_buf *bp); /* new buffer to set */
+void xfs_btree_log_block(struct xfs_btree_cur *, struct xfs_buf *, int);
+void xfs_btree_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, int);
+
+/*
+ * Helpers.
+ */
+static inline int xfs_btree_get_numrecs(struct xfs_btree_block *block)
+{
+ return be16_to_cpu(block->bb_numrecs);
+}
+
+static inline void xfs_btree_set_numrecs(struct xfs_btree_block *block,
+ __uint16_t numrecs)
+{
+ block->bb_numrecs = cpu_to_be16(numrecs);
+}
-#endif /* __KERNEL__ */
+static inline int xfs_btree_get_level(struct xfs_btree_block *block)
+{
+ return be16_to_cpu(block->bb_level);
+}
/*
@@ -459,4 +446,23 @@ xfs_btree_setbuf(
(XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks)
+/*
+ * Trace hooks. Currently not implemented as they need to be ported
+ * over to the generic tracing functionality, which is some effort.
+ *
+ * i,j = integer (32 bit)
+ * b = btree block buffer (xfs_buf_t)
+ * p = btree ptr
+ * r = btree record
+ * k = btree key
+ */
+#define XFS_BTREE_TRACE_ARGBI(c, b, i)
+#define XFS_BTREE_TRACE_ARGBII(c, b, i, j)
+#define XFS_BTREE_TRACE_ARGI(c, i)
+#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s)
+#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r)
+#define XFS_BTREE_TRACE_ARGIK(c, i, k)
+#define XFS_BTREE_TRACE_ARGR(c, r)
+#define XFS_BTREE_TRACE_CURSOR(c, t)
+
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
new file mode 100644
index 00000000000..7a34a1ae655
--- /dev/null
+++ b/fs/xfs/xfs_buf.c
@@ -0,0 +1,1890 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/bio.h>
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+#include <linux/workqueue.h>
+#include <linux/percpu.h>
+#include <linux/blkdev.h>
+#include <linux/hash.h>
+#include <linux/kthread.h>
+#include <linux/migrate.h>
+#include <linux/backing-dev.h>
+#include <linux/freezer.h>
+
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_trace.h"
+#include "xfs_log.h"
+
+static kmem_zone_t *xfs_buf_zone;
+
+static struct workqueue_struct *xfslogd_workqueue;
+
+#ifdef XFS_BUF_LOCK_TRACKING
+# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
+# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
+# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
+#else
+# define XB_SET_OWNER(bp) do { } while (0)
+# define XB_CLEAR_OWNER(bp) do { } while (0)
+# define XB_GET_OWNER(bp) do { } while (0)
+#endif
+
+#define xb_to_gfp(flags) \
+ ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
+
+
+static inline int
+xfs_buf_is_vmapped(
+ struct xfs_buf *bp)
+{
+ /*
+ * Return true if the buffer is vmapped.
+ *
+ * b_addr is null if the buffer is not mapped, but the code is clever
+ * enough to know it doesn't have to map a single page, so the check has
+ * to be both for b_addr and bp->b_page_count > 1.
+ */
+ return bp->b_addr && bp->b_page_count > 1;
+}
+
+static inline int
+xfs_buf_vmap_len(
+ struct xfs_buf *bp)
+{
+ return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
+}
+
+/*
+ * When we mark a buffer stale, we remove the buffer from the LRU and clear the
+ * b_lru_ref count so that the buffer is freed immediately when the buffer
+ * reference count falls to zero. If the buffer is already on the LRU, we need
+ * to remove the reference that LRU holds on the buffer.
+ *
+ * This prevents build-up of stale buffers on the LRU.
+ */
+void
+xfs_buf_stale(
+ struct xfs_buf *bp)
+{
+ ASSERT(xfs_buf_islocked(bp));
+
+ bp->b_flags |= XBF_STALE;
+
+ /*
+ * Clear the delwri status so that a delwri queue walker will not
+ * flush this buffer to disk now that it is stale. The delwri queue has
+ * a reference to the buffer, so this is safe to do.
+ */
+ bp->b_flags &= ~_XBF_DELWRI_Q;
+
+ spin_lock(&bp->b_lock);
+ atomic_set(&bp->b_lru_ref, 0);
+ if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
+ (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
+ atomic_dec(&bp->b_hold);
+
+ ASSERT(atomic_read(&bp->b_hold) >= 1);
+ spin_unlock(&bp->b_lock);
+}
+
+static int
+xfs_buf_get_maps(
+ struct xfs_buf *bp,
+ int map_count)
+{
+ ASSERT(bp->b_maps == NULL);
+ bp->b_map_count = map_count;
+
+ if (map_count == 1) {
+ bp->b_maps = &bp->__b_map;
+ return 0;
+ }
+
+ bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
+ KM_NOFS);
+ if (!bp->b_maps)
+ return ENOMEM;
+ return 0;
+}
+
+/*
+ * Frees b_pages if it was allocated.
+ */
+static void
+xfs_buf_free_maps(
+ struct xfs_buf *bp)
+{
+ if (bp->b_maps != &bp->__b_map) {
+ kmem_free(bp->b_maps);
+ bp->b_maps = NULL;
+ }
+}
+
+struct xfs_buf *
+_xfs_buf_alloc(
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map,
+ int nmaps,
+ xfs_buf_flags_t flags)
+{
+ struct xfs_buf *bp;
+ int error;
+ int i;
+
+ bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
+ if (unlikely(!bp))
+ return NULL;
+
+ /*
+ * We don't want certain flags to appear in b_flags unless they are
+ * specifically set by later operations on the buffer.
+ */
+ flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
+
+ atomic_set(&bp->b_hold, 1);
+ atomic_set(&bp->b_lru_ref, 1);
+ init_completion(&bp->b_iowait);
+ INIT_LIST_HEAD(&bp->b_lru);
+ INIT_LIST_HEAD(&bp->b_list);
+ RB_CLEAR_NODE(&bp->b_rbnode);
+ sema_init(&bp->b_sema, 0); /* held, no waiters */
+ spin_lock_init(&bp->b_lock);
+ XB_SET_OWNER(bp);
+ bp->b_target = target;
+ bp->b_flags = flags;
+
+ /*
+ * Set length and io_length to the same value initially.
+ * I/O routines should use io_length, which will be the same in
+ * most cases but may be reset (e.g. XFS recovery).
+ */
+ error = xfs_buf_get_maps(bp, nmaps);
+ if (error) {
+ kmem_zone_free(xfs_buf_zone, bp);
+ return NULL;
+ }
+
+ bp->b_bn = map[0].bm_bn;
+ bp->b_length = 0;
+ for (i = 0; i < nmaps; i++) {
+ bp->b_maps[i].bm_bn = map[i].bm_bn;
+ bp->b_maps[i].bm_len = map[i].bm_len;
+ bp->b_length += map[i].bm_len;
+ }
+ bp->b_io_length = bp->b_length;
+
+ atomic_set(&bp->b_pin_count, 0);
+ init_waitqueue_head(&bp->b_waiters);
+
+ XFS_STATS_INC(xb_create);
+ trace_xfs_buf_init(bp, _RET_IP_);
+
+ return bp;
+}
+
+/*
+ * Allocate a page array capable of holding a specified number
+ * of pages, and point the page buf at it.
+ */
+STATIC int
+_xfs_buf_get_pages(
+ xfs_buf_t *bp,
+ int page_count)
+{
+ /* Make sure that we have a page list */
+ if (bp->b_pages == NULL) {
+ bp->b_page_count = page_count;
+ if (page_count <= XB_PAGES) {
+ bp->b_pages = bp->b_page_array;
+ } else {
+ bp->b_pages = kmem_alloc(sizeof(struct page *) *
+ page_count, KM_NOFS);
+ if (bp->b_pages == NULL)
+ return -ENOMEM;
+ }
+ memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
+ }
+ return 0;
+}
+
+/*
+ * Frees b_pages if it was allocated.
+ */
+STATIC void
+_xfs_buf_free_pages(
+ xfs_buf_t *bp)
+{
+ if (bp->b_pages != bp->b_page_array) {
+ kmem_free(bp->b_pages);
+ bp->b_pages = NULL;
+ }
+}
+
+/*
+ * Releases the specified buffer.
+ *
+ * The modification state of any associated pages is left unchanged.
+ * The buffer must not be on any hash - use xfs_buf_rele instead for
+ * hashed and refcounted buffers
+ */
+void
+xfs_buf_free(
+ xfs_buf_t *bp)
+{
+ trace_xfs_buf_free(bp, _RET_IP_);
+
+ ASSERT(list_empty(&bp->b_lru));
+
+ if (bp->b_flags & _XBF_PAGES) {
+ uint i;
+
+ if (xfs_buf_is_vmapped(bp))
+ vm_unmap_ram(bp->b_addr - bp->b_offset,
+ bp->b_page_count);
+
+ for (i = 0; i < bp->b_page_count; i++) {
+ struct page *page = bp->b_pages[i];
+
+ __free_page(page);
+ }
+ } else if (bp->b_flags & _XBF_KMEM)
+ kmem_free(bp->b_addr);
+ _xfs_buf_free_pages(bp);
+ xfs_buf_free_maps(bp);
+ kmem_zone_free(xfs_buf_zone, bp);
+}
+
+/*
+ * Allocates all the pages for buffer in question and builds it's page list.
+ */
+STATIC int
+xfs_buf_allocate_memory(
+ xfs_buf_t *bp,
+ uint flags)
+{
+ size_t size;
+ size_t nbytes, offset;
+ gfp_t gfp_mask = xb_to_gfp(flags);
+ unsigned short page_count, i;
+ xfs_off_t start, end;
+ int error;
+
+ /*
+ * for buffers that are contained within a single page, just allocate
+ * the memory from the heap - there's no need for the complexity of
+ * page arrays to keep allocation down to order 0.
+ */
+ size = BBTOB(bp->b_length);
+ if (size < PAGE_SIZE) {
+ bp->b_addr = kmem_alloc(size, KM_NOFS);
+ if (!bp->b_addr) {
+ /* low memory - use alloc_page loop instead */
+ goto use_alloc_page;
+ }
+
+ if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
+ ((unsigned long)bp->b_addr & PAGE_MASK)) {
+ /* b_addr spans two pages - use alloc_page instead */
+ kmem_free(bp->b_addr);
+ bp->b_addr = NULL;
+ goto use_alloc_page;
+ }
+ bp->b_offset = offset_in_page(bp->b_addr);
+ bp->b_pages = bp->b_page_array;
+ bp->b_pages[0] = virt_to_page(bp->b_addr);
+ bp->b_page_count = 1;
+ bp->b_flags |= _XBF_KMEM;
+ return 0;
+ }
+
+use_alloc_page:
+ start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
+ end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
+ >> PAGE_SHIFT;
+ page_count = end - start;
+ error = _xfs_buf_get_pages(bp, page_count);
+ if (unlikely(error))
+ return error;
+
+ offset = bp->b_offset;
+ bp->b_flags |= _XBF_PAGES;
+
+ for (i = 0; i < bp->b_page_count; i++) {
+ struct page *page;
+ uint retries = 0;
+retry:
+ page = alloc_page(gfp_mask);
+ if (unlikely(page == NULL)) {
+ if (flags & XBF_READ_AHEAD) {
+ bp->b_page_count = i;
+ error = ENOMEM;
+ goto out_free_pages;
+ }
+
+ /*
+ * This could deadlock.
+ *
+ * But until all the XFS lowlevel code is revamped to
+ * handle buffer allocation failures we can't do much.
+ */
+ if (!(++retries % 100))
+ xfs_err(NULL,
+ "possible memory allocation deadlock in %s (mode:0x%x)",
+ __func__, gfp_mask);
+
+ XFS_STATS_INC(xb_page_retries);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
+
+ XFS_STATS_INC(xb_page_found);
+
+ nbytes = min_t(size_t, size, PAGE_SIZE - offset);
+ size -= nbytes;
+ bp->b_pages[i] = page;
+ offset = 0;
+ }
+ return 0;
+
+out_free_pages:
+ for (i = 0; i < bp->b_page_count; i++)
+ __free_page(bp->b_pages[i]);
+ return error;
+}
+
+/*
+ * Map buffer into kernel address-space if necessary.
+ */
+STATIC int
+_xfs_buf_map_pages(
+ xfs_buf_t *bp,
+ uint flags)
+{
+ ASSERT(bp->b_flags & _XBF_PAGES);
+ if (bp->b_page_count == 1) {
+ /* A single page buffer is always mappable */
+ bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
+ } else if (flags & XBF_UNMAPPED) {
+ bp->b_addr = NULL;
+ } else {
+ int retried = 0;
+ unsigned noio_flag;
+
+ /*
+ * vm_map_ram() will allocate auxillary structures (e.g.
+ * pagetables) with GFP_KERNEL, yet we are likely to be under
+ * GFP_NOFS context here. Hence we need to tell memory reclaim
+ * that we are in such a context via PF_MEMALLOC_NOIO to prevent
+ * memory reclaim re-entering the filesystem here and
+ * potentially deadlocking.
+ */
+ noio_flag = memalloc_noio_save();
+ do {
+ bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
+ -1, PAGE_KERNEL);
+ if (bp->b_addr)
+ break;
+ vm_unmap_aliases();
+ } while (retried++ <= 1);
+ memalloc_noio_restore(noio_flag);
+
+ if (!bp->b_addr)
+ return -ENOMEM;
+ bp->b_addr += bp->b_offset;
+ }
+
+ return 0;
+}
+
+/*
+ * Finding and Reading Buffers
+ */
+
+/*
+ * Look up, and creates if absent, a lockable buffer for
+ * a given range of an inode. The buffer is returned
+ * locked. No I/O is implied by this call.
+ */
+xfs_buf_t *
+_xfs_buf_find(
+ struct xfs_buftarg *btp,
+ struct xfs_buf_map *map,
+ int nmaps,
+ xfs_buf_flags_t flags,
+ xfs_buf_t *new_bp)
+{
+ size_t numbytes;
+ struct xfs_perag *pag;
+ struct rb_node **rbp;
+ struct rb_node *parent;
+ xfs_buf_t *bp;
+ xfs_daddr_t blkno = map[0].bm_bn;
+ xfs_daddr_t eofs;
+ int numblks = 0;
+ int i;
+
+ for (i = 0; i < nmaps; i++)
+ numblks += map[i].bm_len;
+ numbytes = BBTOB(numblks);
+
+ /* Check for IOs smaller than the sector size / not sector aligned */
+ ASSERT(!(numbytes < btp->bt_meta_sectorsize));
+ ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
+
+ /*
+ * Corrupted block numbers can get through to here, unfortunately, so we
+ * have to check that the buffer falls within the filesystem bounds.
+ */
+ eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
+ if (blkno >= eofs) {
+ /*
+ * XXX (dgc): we should really be returning EFSCORRUPTED here,
+ * but none of the higher level infrastructure supports
+ * returning a specific error on buffer lookup failures.
+ */
+ xfs_alert(btp->bt_mount,
+ "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
+ __func__, blkno, eofs);
+ WARN_ON(1);
+ return NULL;
+ }
+
+ /* get tree root */
+ pag = xfs_perag_get(btp->bt_mount,
+ xfs_daddr_to_agno(btp->bt_mount, blkno));
+
+ /* walk tree */
+ spin_lock(&pag->pag_buf_lock);
+ rbp = &pag->pag_buf_tree.rb_node;
+ parent = NULL;
+ bp = NULL;
+ while (*rbp) {
+ parent = *rbp;
+ bp = rb_entry(parent, struct xfs_buf, b_rbnode);
+
+ if (blkno < bp->b_bn)
+ rbp = &(*rbp)->rb_left;
+ else if (blkno > bp->b_bn)
+ rbp = &(*rbp)->rb_right;
+ else {
+ /*
+ * found a block number match. If the range doesn't
+ * match, the only way this is allowed is if the buffer
+ * in the cache is stale and the transaction that made
+ * it stale has not yet committed. i.e. we are
+ * reallocating a busy extent. Skip this buffer and
+ * continue searching to the right for an exact match.
+ */
+ if (bp->b_length != numblks) {
+ ASSERT(bp->b_flags & XBF_STALE);
+ rbp = &(*rbp)->rb_right;
+ continue;
+ }
+ atomic_inc(&bp->b_hold);
+ goto found;
+ }
+ }
+
+ /* No match found */
+ if (new_bp) {
+ rb_link_node(&new_bp->b_rbnode, parent, rbp);
+ rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
+ /* the buffer keeps the perag reference until it is freed */
+ new_bp->b_pag = pag;
+ spin_unlock(&pag->pag_buf_lock);
+ } else {
+ XFS_STATS_INC(xb_miss_locked);
+ spin_unlock(&pag->pag_buf_lock);
+ xfs_perag_put(pag);
+ }
+ return new_bp;
+
+found:
+ spin_unlock(&pag->pag_buf_lock);
+ xfs_perag_put(pag);
+
+ if (!xfs_buf_trylock(bp)) {
+ if (flags & XBF_TRYLOCK) {
+ xfs_buf_rele(bp);
+ XFS_STATS_INC(xb_busy_locked);
+ return NULL;
+ }
+ xfs_buf_lock(bp);
+ XFS_STATS_INC(xb_get_locked_waited);
+ }
+
+ /*
+ * if the buffer is stale, clear all the external state associated with
+ * it. We need to keep flags such as how we allocated the buffer memory
+ * intact here.
+ */
+ if (bp->b_flags & XBF_STALE) {
+ ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
+ ASSERT(bp->b_iodone == NULL);
+ bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
+ bp->b_ops = NULL;
+ }
+
+ trace_xfs_buf_find(bp, flags, _RET_IP_);
+ XFS_STATS_INC(xb_get_locked);
+ return bp;
+}
+
+/*
+ * Assembles a buffer covering the specified range. The code is optimised for
+ * cache hits, as metadata intensive workloads will see 3 orders of magnitude
+ * more hits than misses.
+ */
+struct xfs_buf *
+xfs_buf_get_map(
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map,
+ int nmaps,
+ xfs_buf_flags_t flags)
+{
+ struct xfs_buf *bp;
+ struct xfs_buf *new_bp;
+ int error = 0;
+
+ bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
+ if (likely(bp))
+ goto found;
+
+ new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
+ if (unlikely(!new_bp))
+ return NULL;
+
+ error = xfs_buf_allocate_memory(new_bp, flags);
+ if (error) {
+ xfs_buf_free(new_bp);
+ return NULL;
+ }
+
+ bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
+ if (!bp) {
+ xfs_buf_free(new_bp);
+ return NULL;
+ }
+
+ if (bp != new_bp)
+ xfs_buf_free(new_bp);
+
+found:
+ if (!bp->b_addr) {
+ error = _xfs_buf_map_pages(bp, flags);
+ if (unlikely(error)) {
+ xfs_warn(target->bt_mount,
+ "%s: failed to map pagesn", __func__);
+ xfs_buf_relse(bp);
+ return NULL;
+ }
+ }
+
+ XFS_STATS_INC(xb_get);
+ trace_xfs_buf_get(bp, flags, _RET_IP_);
+ return bp;
+}
+
+STATIC int
+_xfs_buf_read(
+ xfs_buf_t *bp,
+ xfs_buf_flags_t flags)
+{
+ ASSERT(!(flags & XBF_WRITE));
+ ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
+
+ bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
+ bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
+
+ xfs_buf_iorequest(bp);
+ if (flags & XBF_ASYNC)
+ return 0;
+ return xfs_buf_iowait(bp);
+}
+
+xfs_buf_t *
+xfs_buf_read_map(
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map,
+ int nmaps,
+ xfs_buf_flags_t flags,
+ const struct xfs_buf_ops *ops)
+{
+ struct xfs_buf *bp;
+
+ flags |= XBF_READ;
+
+ bp = xfs_buf_get_map(target, map, nmaps, flags);
+ if (bp) {
+ trace_xfs_buf_read(bp, flags, _RET_IP_);
+
+ if (!XFS_BUF_ISDONE(bp)) {
+ XFS_STATS_INC(xb_get_read);
+ bp->b_ops = ops;
+ _xfs_buf_read(bp, flags);
+ } else if (flags & XBF_ASYNC) {
+ /*
+ * Read ahead call which is already satisfied,
+ * drop the buffer
+ */
+ xfs_buf_relse(bp);
+ return NULL;
+ } else {
+ /* We do not want read in the flags */
+ bp->b_flags &= ~XBF_READ;
+ }
+ }
+
+ return bp;
+}
+
+/*
+ * If we are not low on memory then do the readahead in a deadlock
+ * safe manner.
+ */
+void
+xfs_buf_readahead_map(
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map,
+ int nmaps,
+ const struct xfs_buf_ops *ops)
+{
+ if (bdi_read_congested(target->bt_bdi))
+ return;
+
+ xfs_buf_read_map(target, map, nmaps,
+ XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
+}
+
+/*
+ * Read an uncached buffer from disk. Allocates and returns a locked
+ * buffer containing the disk contents or nothing.
+ */
+struct xfs_buf *
+xfs_buf_read_uncached(
+ struct xfs_buftarg *target,
+ xfs_daddr_t daddr,
+ size_t numblks,
+ int flags,
+ const struct xfs_buf_ops *ops)
+{
+ struct xfs_buf *bp;
+
+ bp = xfs_buf_get_uncached(target, numblks, flags);
+ if (!bp)
+ return NULL;
+
+ /* set up the buffer for a read IO */
+ ASSERT(bp->b_map_count == 1);
+ bp->b_bn = daddr;
+ bp->b_maps[0].bm_bn = daddr;
+ bp->b_flags |= XBF_READ;
+ bp->b_ops = ops;
+
+ if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
+ xfs_buf_relse(bp);
+ return NULL;
+ }
+ xfs_buf_iorequest(bp);
+ xfs_buf_iowait(bp);
+ return bp;
+}
+
+/*
+ * Return a buffer allocated as an empty buffer and associated to external
+ * memory via xfs_buf_associate_memory() back to it's empty state.
+ */
+void
+xfs_buf_set_empty(
+ struct xfs_buf *bp,
+ size_t numblks)
+{
+ if (bp->b_pages)
+ _xfs_buf_free_pages(bp);
+
+ bp->b_pages = NULL;
+ bp->b_page_count = 0;
+ bp->b_addr = NULL;
+ bp->b_length = numblks;
+ bp->b_io_length = numblks;
+
+ ASSERT(bp->b_map_count == 1);
+ bp->b_bn = XFS_BUF_DADDR_NULL;
+ bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
+ bp->b_maps[0].bm_len = bp->b_length;
+}
+
+static inline struct page *
+mem_to_page(
+ void *addr)
+{
+ if ((!is_vmalloc_addr(addr))) {
+ return virt_to_page(addr);
+ } else {
+ return vmalloc_to_page(addr);
+ }
+}
+
+int
+xfs_buf_associate_memory(
+ xfs_buf_t *bp,
+ void *mem,
+ size_t len)
+{
+ int rval;
+ int i = 0;
+ unsigned long pageaddr;
+ unsigned long offset;
+ size_t buflen;
+ int page_count;
+
+ pageaddr = (unsigned long)mem & PAGE_MASK;
+ offset = (unsigned long)mem - pageaddr;
+ buflen = PAGE_ALIGN(len + offset);
+ page_count = buflen >> PAGE_SHIFT;
+
+ /* Free any previous set of page pointers */
+ if (bp->b_pages)
+ _xfs_buf_free_pages(bp);
+
+ bp->b_pages = NULL;
+ bp->b_addr = mem;
+
+ rval = _xfs_buf_get_pages(bp, page_count);
+ if (rval)
+ return rval;
+
+ bp->b_offset = offset;
+
+ for (i = 0; i < bp->b_page_count; i++) {
+ bp->b_pages[i] = mem_to_page((void *)pageaddr);
+ pageaddr += PAGE_SIZE;
+ }
+
+ bp->b_io_length = BTOBB(len);
+ bp->b_length = BTOBB(buflen);
+
+ return 0;
+}
+
+xfs_buf_t *
+xfs_buf_get_uncached(
+ struct xfs_buftarg *target,
+ size_t numblks,
+ int flags)
+{
+ unsigned long page_count;
+ int error, i;
+ struct xfs_buf *bp;
+ DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
+
+ bp = _xfs_buf_alloc(target, &map, 1, 0);
+ if (unlikely(bp == NULL))
+ goto fail;
+
+ page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
+ error = _xfs_buf_get_pages(bp, page_count);
+ if (error)
+ goto fail_free_buf;
+
+ for (i = 0; i < page_count; i++) {
+ bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
+ if (!bp->b_pages[i])
+ goto fail_free_mem;
+ }
+ bp->b_flags |= _XBF_PAGES;
+
+ error = _xfs_buf_map_pages(bp, 0);
+ if (unlikely(error)) {
+ xfs_warn(target->bt_mount,
+ "%s: failed to map pages", __func__);
+ goto fail_free_mem;
+ }
+
+ trace_xfs_buf_get_uncached(bp, _RET_IP_);
+ return bp;
+
+ fail_free_mem:
+ while (--i >= 0)
+ __free_page(bp->b_pages[i]);
+ _xfs_buf_free_pages(bp);
+ fail_free_buf:
+ xfs_buf_free_maps(bp);
+ kmem_zone_free(xfs_buf_zone, bp);
+ fail:
+ return NULL;
+}
+
+/*
+ * Increment reference count on buffer, to hold the buffer concurrently
+ * with another thread which may release (free) the buffer asynchronously.
+ * Must hold the buffer already to call this function.
+ */
+void
+xfs_buf_hold(
+ xfs_buf_t *bp)
+{
+ trace_xfs_buf_hold(bp, _RET_IP_);
+ atomic_inc(&bp->b_hold);
+}
+
+/*
+ * Releases a hold on the specified buffer. If the
+ * the hold count is 1, calls xfs_buf_free.
+ */
+void
+xfs_buf_rele(
+ xfs_buf_t *bp)
+{
+ struct xfs_perag *pag = bp->b_pag;
+
+ trace_xfs_buf_rele(bp, _RET_IP_);
+
+ if (!pag) {
+ ASSERT(list_empty(&bp->b_lru));
+ ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
+ if (atomic_dec_and_test(&bp->b_hold))
+ xfs_buf_free(bp);
+ return;
+ }
+
+ ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
+
+ ASSERT(atomic_read(&bp->b_hold) > 0);
+ if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
+ spin_lock(&bp->b_lock);
+ if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
+ /*
+ * If the buffer is added to the LRU take a new
+ * reference to the buffer for the LRU and clear the
+ * (now stale) dispose list state flag
+ */
+ if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
+ bp->b_state &= ~XFS_BSTATE_DISPOSE;
+ atomic_inc(&bp->b_hold);
+ }
+ spin_unlock(&bp->b_lock);
+ spin_unlock(&pag->pag_buf_lock);
+ } else {
+ /*
+ * most of the time buffers will already be removed from
+ * the LRU, so optimise that case by checking for the
+ * XFS_BSTATE_DISPOSE flag indicating the last list the
+ * buffer was on was the disposal list
+ */
+ if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
+ list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
+ } else {
+ ASSERT(list_empty(&bp->b_lru));
+ }
+ spin_unlock(&bp->b_lock);
+
+ ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
+ rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
+ spin_unlock(&pag->pag_buf_lock);
+ xfs_perag_put(pag);
+ xfs_buf_free(bp);
+ }
+ }
+}
+
+
+/*
+ * Lock a buffer object, if it is not already locked.
+ *
+ * If we come across a stale, pinned, locked buffer, we know that we are
+ * being asked to lock a buffer that has been reallocated. Because it is
+ * pinned, we know that the log has not been pushed to disk and hence it
+ * will still be locked. Rather than continuing to have trylock attempts
+ * fail until someone else pushes the log, push it ourselves before
+ * returning. This means that the xfsaild will not get stuck trying
+ * to push on stale inode buffers.
+ */
+int
+xfs_buf_trylock(
+ struct xfs_buf *bp)
+{
+ int locked;
+
+ locked = down_trylock(&bp->b_sema) == 0;
+ if (locked)
+ XB_SET_OWNER(bp);
+
+ trace_xfs_buf_trylock(bp, _RET_IP_);
+ return locked;
+}
+
+/*
+ * Lock a buffer object.
+ *
+ * If we come across a stale, pinned, locked buffer, we know that we
+ * are being asked to lock a buffer that has been reallocated. Because
+ * it is pinned, we know that the log has not been pushed to disk and
+ * hence it will still be locked. Rather than sleeping until someone
+ * else pushes the log, push it ourselves before trying to get the lock.
+ */
+void
+xfs_buf_lock(
+ struct xfs_buf *bp)
+{
+ trace_xfs_buf_lock(bp, _RET_IP_);
+
+ if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
+ xfs_log_force(bp->b_target->bt_mount, 0);
+ down(&bp->b_sema);
+ XB_SET_OWNER(bp);
+
+ trace_xfs_buf_lock_done(bp, _RET_IP_);
+}
+
+void
+xfs_buf_unlock(
+ struct xfs_buf *bp)
+{
+ XB_CLEAR_OWNER(bp);
+ up(&bp->b_sema);
+
+ trace_xfs_buf_unlock(bp, _RET_IP_);
+}
+
+STATIC void
+xfs_buf_wait_unpin(
+ xfs_buf_t *bp)
+{
+ DECLARE_WAITQUEUE (wait, current);
+
+ if (atomic_read(&bp->b_pin_count) == 0)
+ return;
+
+ add_wait_queue(&bp->b_waiters, &wait);
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&bp->b_pin_count) == 0)
+ break;
+ io_schedule();
+ }
+ remove_wait_queue(&bp->b_waiters, &wait);
+ set_current_state(TASK_RUNNING);
+}
+
+/*
+ * Buffer Utility Routines
+ */
+
+STATIC void
+xfs_buf_iodone_work(
+ struct work_struct *work)
+{
+ struct xfs_buf *bp =
+ container_of(work, xfs_buf_t, b_iodone_work);
+ bool read = !!(bp->b_flags & XBF_READ);
+
+ bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
+
+ /* only validate buffers that were read without errors */
+ if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE))
+ bp->b_ops->verify_read(bp);
+
+ if (bp->b_iodone)
+ (*(bp->b_iodone))(bp);
+ else if (bp->b_flags & XBF_ASYNC)
+ xfs_buf_relse(bp);
+ else {
+ ASSERT(read && bp->b_ops);
+ complete(&bp->b_iowait);
+ }
+}
+
+void
+xfs_buf_ioend(
+ struct xfs_buf *bp,
+ int schedule)
+{
+ bool read = !!(bp->b_flags & XBF_READ);
+
+ trace_xfs_buf_iodone(bp, _RET_IP_);
+
+ if (bp->b_error == 0)
+ bp->b_flags |= XBF_DONE;
+
+ if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
+ if (schedule) {
+ INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
+ queue_work(xfslogd_workqueue, &bp->b_iodone_work);
+ } else {
+ xfs_buf_iodone_work(&bp->b_iodone_work);
+ }
+ } else {
+ bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
+ complete(&bp->b_iowait);
+ }
+}
+
+void
+xfs_buf_ioerror(
+ xfs_buf_t *bp,
+ int error)
+{
+ ASSERT(error >= 0 && error <= 0xffff);
+ bp->b_error = (unsigned short)error;
+ trace_xfs_buf_ioerror(bp, error, _RET_IP_);
+}
+
+void
+xfs_buf_ioerror_alert(
+ struct xfs_buf *bp,
+ const char *func)
+{
+ xfs_alert(bp->b_target->bt_mount,
+"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
+ (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
+}
+
+/*
+ * Called when we want to stop a buffer from getting written or read.
+ * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
+ * so that the proper iodone callbacks get called.
+ */
+STATIC int
+xfs_bioerror(
+ xfs_buf_t *bp)
+{
+#ifdef XFSERRORDEBUG
+ ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
+#endif
+
+ /*
+ * No need to wait until the buffer is unpinned, we aren't flushing it.
+ */
+ xfs_buf_ioerror(bp, EIO);
+
+ /*
+ * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
+ */
+ XFS_BUF_UNREAD(bp);
+ XFS_BUF_UNDONE(bp);
+ xfs_buf_stale(bp);
+
+ xfs_buf_ioend(bp, 0);
+
+ return EIO;
+}
+
+/*
+ * Same as xfs_bioerror, except that we are releasing the buffer
+ * here ourselves, and avoiding the xfs_buf_ioend call.
+ * This is meant for userdata errors; metadata bufs come with
+ * iodone functions attached, so that we can track down errors.
+ */
+int
+xfs_bioerror_relse(
+ struct xfs_buf *bp)
+{
+ int64_t fl = bp->b_flags;
+ /*
+ * No need to wait until the buffer is unpinned.
+ * We aren't flushing it.
+ *
+ * chunkhold expects B_DONE to be set, whether
+ * we actually finish the I/O or not. We don't want to
+ * change that interface.
+ */
+ XFS_BUF_UNREAD(bp);
+ XFS_BUF_DONE(bp);
+ xfs_buf_stale(bp);
+ bp->b_iodone = NULL;
+ if (!(fl & XBF_ASYNC)) {
+ /*
+ * Mark b_error and B_ERROR _both_.
+ * Lot's of chunkcache code assumes that.
+ * There's no reason to mark error for
+ * ASYNC buffers.
+ */
+ xfs_buf_ioerror(bp, EIO);
+ complete(&bp->b_iowait);
+ } else {
+ xfs_buf_relse(bp);
+ }
+
+ return EIO;
+}
+
+STATIC int
+xfs_bdstrat_cb(
+ struct xfs_buf *bp)
+{
+ if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
+ /*
+ * Metadata write that didn't get logged but
+ * written delayed anyway. These aren't associated
+ * with a transaction, and can be ignored.
+ */
+ if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
+ return xfs_bioerror_relse(bp);
+ else
+ return xfs_bioerror(bp);
+ }
+
+ xfs_buf_iorequest(bp);
+ return 0;
+}
+
+int
+xfs_bwrite(
+ struct xfs_buf *bp)
+{
+ int error;
+
+ ASSERT(xfs_buf_islocked(bp));
+
+ bp->b_flags |= XBF_WRITE;
+ bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
+
+ xfs_bdstrat_cb(bp);
+
+ error = xfs_buf_iowait(bp);
+ if (error) {
+ xfs_force_shutdown(bp->b_target->bt_mount,
+ SHUTDOWN_META_IO_ERROR);
+ }
+ return error;
+}
+
+STATIC void
+_xfs_buf_ioend(
+ xfs_buf_t *bp,
+ int schedule)
+{
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
+ xfs_buf_ioend(bp, schedule);
+}
+
+STATIC void
+xfs_buf_bio_end_io(
+ struct bio *bio,
+ int error)
+{
+ xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
+
+ /*
+ * don't overwrite existing errors - otherwise we can lose errors on
+ * buffers that require multiple bios to complete.
+ */
+ if (!bp->b_error)
+ xfs_buf_ioerror(bp, -error);
+
+ if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
+ invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
+
+ _xfs_buf_ioend(bp, 1);
+ bio_put(bio);
+}
+
+static void
+xfs_buf_ioapply_map(
+ struct xfs_buf *bp,
+ int map,
+ int *buf_offset,
+ int *count,
+ int rw)
+{
+ int page_index;
+ int total_nr_pages = bp->b_page_count;
+ int nr_pages;
+ struct bio *bio;
+ sector_t sector = bp->b_maps[map].bm_bn;
+ int size;
+ int offset;
+
+ total_nr_pages = bp->b_page_count;
+
+ /* skip the pages in the buffer before the start offset */
+ page_index = 0;
+ offset = *buf_offset;
+ while (offset >= PAGE_SIZE) {
+ page_index++;
+ offset -= PAGE_SIZE;
+ }
+
+ /*
+ * Limit the IO size to the length of the current vector, and update the
+ * remaining IO count for the next time around.
+ */
+ size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
+ *count -= size;
+ *buf_offset += size;
+
+next_chunk:
+ atomic_inc(&bp->b_io_remaining);
+ nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
+ if (nr_pages > total_nr_pages)
+ nr_pages = total_nr_pages;
+
+ bio = bio_alloc(GFP_NOIO, nr_pages);
+ bio->bi_bdev = bp->b_target->bt_bdev;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_end_io = xfs_buf_bio_end_io;
+ bio->bi_private = bp;
+
+
+ for (; size && nr_pages; nr_pages--, page_index++) {
+ int rbytes, nbytes = PAGE_SIZE - offset;
+
+ if (nbytes > size)
+ nbytes = size;
+
+ rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
+ offset);
+ if (rbytes < nbytes)
+ break;
+
+ offset = 0;
+ sector += BTOBB(nbytes);
+ size -= nbytes;
+ total_nr_pages--;
+ }
+
+ if (likely(bio->bi_iter.bi_size)) {
+ if (xfs_buf_is_vmapped(bp)) {
+ flush_kernel_vmap_range(bp->b_addr,
+ xfs_buf_vmap_len(bp));
+ }
+ submit_bio(rw, bio);
+ if (size)
+ goto next_chunk;
+ } else {
+ /*
+ * This is guaranteed not to be the last io reference count
+ * because the caller (xfs_buf_iorequest) holds a count itself.
+ */
+ atomic_dec(&bp->b_io_remaining);
+ xfs_buf_ioerror(bp, EIO);
+ bio_put(bio);
+ }
+
+}
+
+STATIC void
+_xfs_buf_ioapply(
+ struct xfs_buf *bp)
+{
+ struct blk_plug plug;
+ int rw;
+ int offset;
+ int size;
+ int i;
+
+ /*
+ * Make sure we capture only current IO errors rather than stale errors
+ * left over from previous use of the buffer (e.g. failed readahead).
+ */
+ bp->b_error = 0;
+
+ if (bp->b_flags & XBF_WRITE) {
+ if (bp->b_flags & XBF_SYNCIO)
+ rw = WRITE_SYNC;
+ else
+ rw = WRITE;
+ if (bp->b_flags & XBF_FUA)
+ rw |= REQ_FUA;
+ if (bp->b_flags & XBF_FLUSH)
+ rw |= REQ_FLUSH;
+
+ /*
+ * Run the write verifier callback function if it exists. If
+ * this function fails it will mark the buffer with an error and
+ * the IO should not be dispatched.
+ */
+ if (bp->b_ops) {
+ bp->b_ops->verify_write(bp);
+ if (bp->b_error) {
+ xfs_force_shutdown(bp->b_target->bt_mount,
+ SHUTDOWN_CORRUPT_INCORE);
+ return;
+ }
+ }
+ } else if (bp->b_flags & XBF_READ_AHEAD) {
+ rw = READA;
+ } else {
+ rw = READ;
+ }
+
+ /* we only use the buffer cache for meta-data */
+ rw |= REQ_META;
+
+ /*
+ * Walk all the vectors issuing IO on them. Set up the initial offset
+ * into the buffer and the desired IO size before we start -
+ * _xfs_buf_ioapply_vec() will modify them appropriately for each
+ * subsequent call.
+ */
+ offset = bp->b_offset;
+ size = BBTOB(bp->b_io_length);
+ blk_start_plug(&plug);
+ for (i = 0; i < bp->b_map_count; i++) {
+ xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
+ if (bp->b_error)
+ break;
+ if (size <= 0)
+ break; /* all done */
+ }
+ blk_finish_plug(&plug);
+}
+
+void
+xfs_buf_iorequest(
+ xfs_buf_t *bp)
+{
+ trace_xfs_buf_iorequest(bp, _RET_IP_);
+
+ ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
+
+ if (bp->b_flags & XBF_WRITE)
+ xfs_buf_wait_unpin(bp);
+ xfs_buf_hold(bp);
+
+ /*
+ * Set the count to 1 initially, this will stop an I/O
+ * completion callout which happens before we have started
+ * all the I/O from calling xfs_buf_ioend too early.
+ */
+ atomic_set(&bp->b_io_remaining, 1);
+ _xfs_buf_ioapply(bp);
+ /*
+ * If _xfs_buf_ioapply failed, we'll get back here with
+ * only the reference we took above. _xfs_buf_ioend will
+ * drop it to zero, so we'd better not queue it for later,
+ * or we'll free it before it's done.
+ */
+ _xfs_buf_ioend(bp, bp->b_error ? 0 : 1);
+
+ xfs_buf_rele(bp);
+}
+
+/*
+ * Waits for I/O to complete on the buffer supplied. It returns immediately if
+ * no I/O is pending or there is already a pending error on the buffer, in which
+ * case nothing will ever complete. It returns the I/O error code, if any, or
+ * 0 if there was no error.
+ */
+int
+xfs_buf_iowait(
+ xfs_buf_t *bp)
+{
+ trace_xfs_buf_iowait(bp, _RET_IP_);
+
+ if (!bp->b_error)
+ wait_for_completion(&bp->b_iowait);
+
+ trace_xfs_buf_iowait_done(bp, _RET_IP_);
+ return bp->b_error;
+}
+
+xfs_caddr_t
+xfs_buf_offset(
+ xfs_buf_t *bp,
+ size_t offset)
+{
+ struct page *page;
+
+ if (bp->b_addr)
+ return bp->b_addr + offset;
+
+ offset += bp->b_offset;
+ page = bp->b_pages[offset >> PAGE_SHIFT];
+ return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
+}
+
+/*
+ * Move data into or out of a buffer.
+ */
+void
+xfs_buf_iomove(
+ xfs_buf_t *bp, /* buffer to process */
+ size_t boff, /* starting buffer offset */
+ size_t bsize, /* length to copy */
+ void *data, /* data address */
+ xfs_buf_rw_t mode) /* read/write/zero flag */
+{
+ size_t bend;
+
+ bend = boff + bsize;
+ while (boff < bend) {
+ struct page *page;
+ int page_index, page_offset, csize;
+
+ page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
+ page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
+ page = bp->b_pages[page_index];
+ csize = min_t(size_t, PAGE_SIZE - page_offset,
+ BBTOB(bp->b_io_length) - boff);
+
+ ASSERT((csize + page_offset) <= PAGE_SIZE);
+
+ switch (mode) {
+ case XBRW_ZERO:
+ memset(page_address(page) + page_offset, 0, csize);
+ break;
+ case XBRW_READ:
+ memcpy(data, page_address(page) + page_offset, csize);
+ break;
+ case XBRW_WRITE:
+ memcpy(page_address(page) + page_offset, data, csize);
+ }
+
+ boff += csize;
+ data += csize;
+ }
+}
+
+/*
+ * Handling of buffer targets (buftargs).
+ */
+
+/*
+ * Wait for any bufs with callbacks that have been submitted but have not yet
+ * returned. These buffers will have an elevated hold count, so wait on those
+ * while freeing all the buffers only held by the LRU.
+ */
+static enum lru_status
+xfs_buftarg_wait_rele(
+ struct list_head *item,
+ spinlock_t *lru_lock,
+ void *arg)
+
+{
+ struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
+ struct list_head *dispose = arg;
+
+ if (atomic_read(&bp->b_hold) > 1) {
+ /* need to wait, so skip it this pass */
+ trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
+ return LRU_SKIP;
+ }
+ if (!spin_trylock(&bp->b_lock))
+ return LRU_SKIP;
+
+ /*
+ * clear the LRU reference count so the buffer doesn't get
+ * ignored in xfs_buf_rele().
+ */
+ atomic_set(&bp->b_lru_ref, 0);
+ bp->b_state |= XFS_BSTATE_DISPOSE;
+ list_move(item, dispose);
+ spin_unlock(&bp->b_lock);
+ return LRU_REMOVED;
+}
+
+void
+xfs_wait_buftarg(
+ struct xfs_buftarg *btp)
+{
+ LIST_HEAD(dispose);
+ int loop = 0;
+
+ /* loop until there is nothing left on the lru list. */
+ while (list_lru_count(&btp->bt_lru)) {
+ list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
+ &dispose, LONG_MAX);
+
+ while (!list_empty(&dispose)) {
+ struct xfs_buf *bp;
+ bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
+ list_del_init(&bp->b_lru);
+ if (bp->b_flags & XBF_WRITE_FAIL) {
+ xfs_alert(btp->bt_mount,
+"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
+"Please run xfs_repair to determine the extent of the problem.",
+ (long long)bp->b_bn);
+ }
+ xfs_buf_rele(bp);
+ }
+ if (loop++ != 0)
+ delay(100);
+ }
+}
+
+static enum lru_status
+xfs_buftarg_isolate(
+ struct list_head *item,
+ spinlock_t *lru_lock,
+ void *arg)
+{
+ struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
+ struct list_head *dispose = arg;
+
+ /*
+ * we are inverting the lru lock/bp->b_lock here, so use a trylock.
+ * If we fail to get the lock, just skip it.
+ */
+ if (!spin_trylock(&bp->b_lock))
+ return LRU_SKIP;
+ /*
+ * Decrement the b_lru_ref count unless the value is already
+ * zero. If the value is already zero, we need to reclaim the
+ * buffer, otherwise it gets another trip through the LRU.
+ */
+ if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
+ spin_unlock(&bp->b_lock);
+ return LRU_ROTATE;
+ }
+
+ bp->b_state |= XFS_BSTATE_DISPOSE;
+ list_move(item, dispose);
+ spin_unlock(&bp->b_lock);
+ return LRU_REMOVED;
+}
+
+static unsigned long
+xfs_buftarg_shrink_scan(
+ struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct xfs_buftarg *btp = container_of(shrink,
+ struct xfs_buftarg, bt_shrinker);
+ LIST_HEAD(dispose);
+ unsigned long freed;
+ unsigned long nr_to_scan = sc->nr_to_scan;
+
+ freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
+ &dispose, &nr_to_scan);
+
+ while (!list_empty(&dispose)) {
+ struct xfs_buf *bp;
+ bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
+ list_del_init(&bp->b_lru);
+ xfs_buf_rele(bp);
+ }
+
+ return freed;
+}
+
+static unsigned long
+xfs_buftarg_shrink_count(
+ struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct xfs_buftarg *btp = container_of(shrink,
+ struct xfs_buftarg, bt_shrinker);
+ return list_lru_count_node(&btp->bt_lru, sc->nid);
+}
+
+void
+xfs_free_buftarg(
+ struct xfs_mount *mp,
+ struct xfs_buftarg *btp)
+{
+ unregister_shrinker(&btp->bt_shrinker);
+ list_lru_destroy(&btp->bt_lru);
+
+ if (mp->m_flags & XFS_MOUNT_BARRIER)
+ xfs_blkdev_issue_flush(btp);
+
+ kmem_free(btp);
+}
+
+int
+xfs_setsize_buftarg(
+ xfs_buftarg_t *btp,
+ unsigned int sectorsize)
+{
+ /* Set up metadata sector size info */
+ btp->bt_meta_sectorsize = sectorsize;
+ btp->bt_meta_sectormask = sectorsize - 1;
+
+ if (set_blocksize(btp->bt_bdev, sectorsize)) {
+ char name[BDEVNAME_SIZE];
+
+ bdevname(btp->bt_bdev, name);
+
+ xfs_warn(btp->bt_mount,
+ "Cannot set_blocksize to %u on device %s",
+ sectorsize, name);
+ return EINVAL;
+ }
+
+ /* Set up device logical sector size mask */
+ btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
+ btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
+
+ return 0;
+}
+
+/*
+ * When allocating the initial buffer target we have not yet
+ * read in the superblock, so don't know what sized sectors
+ * are being used at this early stage. Play safe.
+ */
+STATIC int
+xfs_setsize_buftarg_early(
+ xfs_buftarg_t *btp,
+ struct block_device *bdev)
+{
+ return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
+}
+
+xfs_buftarg_t *
+xfs_alloc_buftarg(
+ struct xfs_mount *mp,
+ struct block_device *bdev)
+{
+ xfs_buftarg_t *btp;
+
+ btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
+
+ btp->bt_mount = mp;
+ btp->bt_dev = bdev->bd_dev;
+ btp->bt_bdev = bdev;
+ btp->bt_bdi = blk_get_backing_dev_info(bdev);
+ if (!btp->bt_bdi)
+ goto error;
+
+ if (xfs_setsize_buftarg_early(btp, bdev))
+ goto error;
+
+ if (list_lru_init(&btp->bt_lru))
+ goto error;
+
+ btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
+ btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
+ btp->bt_shrinker.seeks = DEFAULT_SEEKS;
+ btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
+ register_shrinker(&btp->bt_shrinker);
+ return btp;
+
+error:
+ kmem_free(btp);
+ return NULL;
+}
+
+/*
+ * Add a buffer to the delayed write list.
+ *
+ * This queues a buffer for writeout if it hasn't already been. Note that
+ * neither this routine nor the buffer list submission functions perform
+ * any internal synchronization. It is expected that the lists are thread-local
+ * to the callers.
+ *
+ * Returns true if we queued up the buffer, or false if it already had
+ * been on the buffer list.
+ */
+bool
+xfs_buf_delwri_queue(
+ struct xfs_buf *bp,
+ struct list_head *list)
+{
+ ASSERT(xfs_buf_islocked(bp));
+ ASSERT(!(bp->b_flags & XBF_READ));
+
+ /*
+ * If the buffer is already marked delwri it already is queued up
+ * by someone else for imediate writeout. Just ignore it in that
+ * case.
+ */
+ if (bp->b_flags & _XBF_DELWRI_Q) {
+ trace_xfs_buf_delwri_queued(bp, _RET_IP_);
+ return false;
+ }
+
+ trace_xfs_buf_delwri_queue(bp, _RET_IP_);
+
+ /*
+ * If a buffer gets written out synchronously or marked stale while it
+ * is on a delwri list we lazily remove it. To do this, the other party
+ * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
+ * It remains referenced and on the list. In a rare corner case it
+ * might get readded to a delwri list after the synchronous writeout, in
+ * which case we need just need to re-add the flag here.
+ */
+ bp->b_flags |= _XBF_DELWRI_Q;
+ if (list_empty(&bp->b_list)) {
+ atomic_inc(&bp->b_hold);
+ list_add_tail(&bp->b_list, list);
+ }
+
+ return true;
+}
+
+/*
+ * Compare function is more complex than it needs to be because
+ * the return value is only 32 bits and we are doing comparisons
+ * on 64 bit values
+ */
+static int
+xfs_buf_cmp(
+ void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
+ struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
+ xfs_daddr_t diff;
+
+ diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
+ if (diff < 0)
+ return -1;
+ if (diff > 0)
+ return 1;
+ return 0;
+}
+
+static int
+__xfs_buf_delwri_submit(
+ struct list_head *buffer_list,
+ struct list_head *io_list,
+ bool wait)
+{
+ struct blk_plug plug;
+ struct xfs_buf *bp, *n;
+ int pinned = 0;
+
+ list_for_each_entry_safe(bp, n, buffer_list, b_list) {
+ if (!wait) {
+ if (xfs_buf_ispinned(bp)) {
+ pinned++;
+ continue;
+ }
+ if (!xfs_buf_trylock(bp))
+ continue;
+ } else {
+ xfs_buf_lock(bp);
+ }
+
+ /*
+ * Someone else might have written the buffer synchronously or
+ * marked it stale in the meantime. In that case only the
+ * _XBF_DELWRI_Q flag got cleared, and we have to drop the
+ * reference and remove it from the list here.
+ */
+ if (!(bp->b_flags & _XBF_DELWRI_Q)) {
+ list_del_init(&bp->b_list);
+ xfs_buf_relse(bp);
+ continue;
+ }
+
+ list_move_tail(&bp->b_list, io_list);
+ trace_xfs_buf_delwri_split(bp, _RET_IP_);
+ }
+
+ list_sort(NULL, io_list, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
+ list_for_each_entry_safe(bp, n, io_list, b_list) {
+ bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
+ bp->b_flags |= XBF_WRITE;
+
+ if (!wait) {
+ bp->b_flags |= XBF_ASYNC;
+ list_del_init(&bp->b_list);
+ }
+ xfs_bdstrat_cb(bp);
+ }
+ blk_finish_plug(&plug);
+
+ return pinned;
+}
+
+/*
+ * Write out a buffer list asynchronously.
+ *
+ * This will take the @buffer_list, write all non-locked and non-pinned buffers
+ * out and not wait for I/O completion on any of the buffers. This interface
+ * is only safely useable for callers that can track I/O completion by higher
+ * level means, e.g. AIL pushing as the @buffer_list is consumed in this
+ * function.
+ */
+int
+xfs_buf_delwri_submit_nowait(
+ struct list_head *buffer_list)
+{
+ LIST_HEAD (io_list);
+ return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
+}
+
+/*
+ * Write out a buffer list synchronously.
+ *
+ * This will take the @buffer_list, write all buffers out and wait for I/O
+ * completion on all of the buffers. @buffer_list is consumed by the function,
+ * so callers must have some other way of tracking buffers if they require such
+ * functionality.
+ */
+int
+xfs_buf_delwri_submit(
+ struct list_head *buffer_list)
+{
+ LIST_HEAD (io_list);
+ int error = 0, error2;
+ struct xfs_buf *bp;
+
+ __xfs_buf_delwri_submit(buffer_list, &io_list, true);
+
+ /* Wait for IO to complete. */
+ while (!list_empty(&io_list)) {
+ bp = list_first_entry(&io_list, struct xfs_buf, b_list);
+
+ list_del_init(&bp->b_list);
+ error2 = xfs_buf_iowait(bp);
+ xfs_buf_relse(bp);
+ if (!error)
+ error = error2;
+ }
+
+ return error;
+}
+
+int __init
+xfs_buf_init(void)
+{
+ xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
+ KM_ZONE_HWALIGN, NULL);
+ if (!xfs_buf_zone)
+ goto out;
+
+ xfslogd_workqueue = alloc_workqueue("xfslogd",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+ if (!xfslogd_workqueue)
+ goto out_free_buf_zone;
+
+ return 0;
+
+ out_free_buf_zone:
+ kmem_zone_destroy(xfs_buf_zone);
+ out:
+ return -ENOMEM;
+}
+
+void
+xfs_buf_terminate(void)
+{
+ destroy_workqueue(xfslogd_workqueue);
+ kmem_zone_destroy(xfs_buf_zone);
+}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
new file mode 100644
index 00000000000..3a7a5523d3d
--- /dev/null
+++ b/fs/xfs/xfs_buf.h
@@ -0,0 +1,393 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __XFS_BUF_H__
+#define __XFS_BUF_H__
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/uio.h>
+#include <linux/list_lru.h>
+
+/*
+ * Base types
+ */
+
+#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
+
+typedef enum {
+ XBRW_READ = 1, /* transfer into target memory */
+ XBRW_WRITE = 2, /* transfer from target memory */
+ XBRW_ZERO = 3, /* Zero target memory */
+} xfs_buf_rw_t;
+
+#define XBF_READ (1 << 0) /* buffer intended for reading from device */
+#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
+#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
+#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
+#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
+#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
+#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
+
+/* I/O hints for the BIO layer */
+#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
+#define XBF_FUA (1 << 11)/* force cache write through mode */
+#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
+
+/* flags used only as arguments to access routines */
+#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
+#define XBF_UNMAPPED (1 << 17)/* do not map the buffer */
+
+/* flags used only internally */
+#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
+#define _XBF_KMEM (1 << 21)/* backed by heap memory */
+#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
+#define _XBF_COMPOUND (1 << 23)/* compound buffer */
+
+typedef unsigned int xfs_buf_flags_t;
+
+#define XFS_BUF_FLAGS \
+ { XBF_READ, "READ" }, \
+ { XBF_WRITE, "WRITE" }, \
+ { XBF_READ_AHEAD, "READ_AHEAD" }, \
+ { XBF_ASYNC, "ASYNC" }, \
+ { XBF_DONE, "DONE" }, \
+ { XBF_STALE, "STALE" }, \
+ { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
+ { XBF_SYNCIO, "SYNCIO" }, \
+ { XBF_FUA, "FUA" }, \
+ { XBF_FLUSH, "FLUSH" }, \
+ { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
+ { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
+ { _XBF_PAGES, "PAGES" }, \
+ { _XBF_KMEM, "KMEM" }, \
+ { _XBF_DELWRI_Q, "DELWRI_Q" }, \
+ { _XBF_COMPOUND, "COMPOUND" }
+
+
+/*
+ * Internal state flags.
+ */
+#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
+
+/*
+ * The xfs_buftarg contains 2 notions of "sector size" -
+ *
+ * 1) The metadata sector size, which is the minimum unit and
+ * alignment of IO which will be performed by metadata operations.
+ * 2) The device logical sector size
+ *
+ * The first is specified at mkfs time, and is stored on-disk in the
+ * superblock's sb_sectsize.
+ *
+ * The latter is derived from the underlying device, and controls direct IO
+ * alignment constraints.
+ */
+typedef struct xfs_buftarg {
+ dev_t bt_dev;
+ struct block_device *bt_bdev;
+ struct backing_dev_info *bt_bdi;
+ struct xfs_mount *bt_mount;
+ unsigned int bt_meta_sectorsize;
+ size_t bt_meta_sectormask;
+ size_t bt_logical_sectorsize;
+ size_t bt_logical_sectormask;
+
+ /* LRU control structures */
+ struct shrinker bt_shrinker;
+ struct list_lru bt_lru;
+} xfs_buftarg_t;
+
+struct xfs_buf;
+typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
+
+
+#define XB_PAGES 2
+
+struct xfs_buf_map {
+ xfs_daddr_t bm_bn; /* block number for I/O */
+ int bm_len; /* size of I/O */
+};
+
+#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
+ struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
+
+struct xfs_buf_ops {
+ void (*verify_read)(struct xfs_buf *);
+ void (*verify_write)(struct xfs_buf *);
+};
+
+typedef struct xfs_buf {
+ /*
+ * first cacheline holds all the fields needed for an uncontended cache
+ * hit to be fully processed. The semaphore straddles the cacheline
+ * boundary, but the counter and lock sits on the first cacheline,
+ * which is the only bit that is touched if we hit the semaphore
+ * fast-path on locking.
+ */
+ struct rb_node b_rbnode; /* rbtree node */
+ xfs_daddr_t b_bn; /* block number of buffer */
+ int b_length; /* size of buffer in BBs */
+ atomic_t b_hold; /* reference count */
+ atomic_t b_lru_ref; /* lru reclaim ref count */
+ xfs_buf_flags_t b_flags; /* status flags */
+ struct semaphore b_sema; /* semaphore for lockables */
+
+ /*
+ * concurrent access to b_lru and b_lru_flags are protected by
+ * bt_lru_lock and not by b_sema
+ */
+ struct list_head b_lru; /* lru list */
+ spinlock_t b_lock; /* internal state lock */
+ unsigned int b_state; /* internal state flags */
+ wait_queue_head_t b_waiters; /* unpin waiters */
+ struct list_head b_list;
+ struct xfs_perag *b_pag; /* contains rbtree root */
+ xfs_buftarg_t *b_target; /* buffer target (device) */
+ void *b_addr; /* virtual address of buffer */
+ struct work_struct b_iodone_work;
+ xfs_buf_iodone_t b_iodone; /* I/O completion function */
+ struct completion b_iowait; /* queue for I/O waiters */
+ void *b_fspriv;
+ struct xfs_trans *b_transp;
+ struct page **b_pages; /* array of page pointers */
+ struct page *b_page_array[XB_PAGES]; /* inline pages */
+ struct xfs_buf_map *b_maps; /* compound buffer map */
+ struct xfs_buf_map __b_map; /* inline compound buffer map */
+ int b_map_count;
+ int b_io_length; /* IO size in BBs */
+ atomic_t b_pin_count; /* pin count */
+ atomic_t b_io_remaining; /* #outstanding I/O requests */
+ unsigned int b_page_count; /* size of page array */
+ unsigned int b_offset; /* page offset in first page */
+ unsigned short b_error; /* error code on I/O */
+ const struct xfs_buf_ops *b_ops;
+
+#ifdef XFS_BUF_LOCK_TRACKING
+ int b_last_holder;
+#endif
+} xfs_buf_t;
+
+/* Finding and Reading Buffers */
+struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ xfs_buf_flags_t flags, struct xfs_buf *new_bp);
+
+static inline struct xfs_buf *
+xfs_incore(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
+ xfs_buf_flags_t flags)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+ return _xfs_buf_find(target, &map, 1, flags, NULL);
+}
+
+struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ xfs_buf_flags_t flags);
+
+static inline struct xfs_buf *
+xfs_buf_alloc(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
+ xfs_buf_flags_t flags)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+ return _xfs_buf_alloc(target, &map, 1, flags);
+}
+
+struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ xfs_buf_flags_t flags);
+struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ xfs_buf_flags_t flags,
+ const struct xfs_buf_ops *ops);
+void xfs_buf_readahead_map(struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ const struct xfs_buf_ops *ops);
+
+static inline struct xfs_buf *
+xfs_buf_get(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
+ xfs_buf_flags_t flags)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+ return xfs_buf_get_map(target, &map, 1, flags);
+}
+
+static inline struct xfs_buf *
+xfs_buf_read(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
+ xfs_buf_flags_t flags,
+ const struct xfs_buf_ops *ops)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+ return xfs_buf_read_map(target, &map, 1, flags, ops);
+}
+
+static inline void
+xfs_buf_readahead(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
+ const struct xfs_buf_ops *ops)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+ return xfs_buf_readahead_map(target, &map, 1, ops);
+}
+
+struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
+void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
+int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
+
+struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
+ int flags);
+struct xfs_buf *xfs_buf_read_uncached(struct xfs_buftarg *target,
+ xfs_daddr_t daddr, size_t numblks, int flags,
+ const struct xfs_buf_ops *ops);
+void xfs_buf_hold(struct xfs_buf *bp);
+
+/* Releasing Buffers */
+extern void xfs_buf_free(xfs_buf_t *);
+extern void xfs_buf_rele(xfs_buf_t *);
+
+/* Locking and Unlocking Buffers */
+extern int xfs_buf_trylock(xfs_buf_t *);
+extern void xfs_buf_lock(xfs_buf_t *);
+extern void xfs_buf_unlock(xfs_buf_t *);
+#define xfs_buf_islocked(bp) \
+ ((bp)->b_sema.count <= 0)
+
+/* Buffer Read and Write Routines */
+extern int xfs_bwrite(struct xfs_buf *bp);
+extern void xfs_buf_ioend(xfs_buf_t *, int);
+extern void xfs_buf_ioerror(xfs_buf_t *, int);
+extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
+extern void xfs_buf_iorequest(xfs_buf_t *);
+extern int xfs_buf_iowait(xfs_buf_t *);
+extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
+ xfs_buf_rw_t);
+#define xfs_buf_zero(bp, off, len) \
+ xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
+
+extern int xfs_bioerror_relse(struct xfs_buf *);
+
+/* Buffer Utility Routines */
+extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
+
+/* Delayed Write Buffer Routines */
+extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
+extern int xfs_buf_delwri_submit(struct list_head *);
+extern int xfs_buf_delwri_submit_nowait(struct list_head *);
+
+/* Buffer Daemon Setup Routines */
+extern int xfs_buf_init(void);
+extern void xfs_buf_terminate(void);
+
+#define XFS_BUF_ZEROFLAGS(bp) \
+ ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
+ XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
+ XBF_WRITE_FAIL))
+
+void xfs_buf_stale(struct xfs_buf *bp);
+#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
+#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE)
+
+#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
+#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
+#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
+
+#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC)
+#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
+#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
+
+#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
+#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
+#define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ)
+
+#define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE)
+#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
+#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
+
+/*
+ * These macros use the IO block map rather than b_bn. b_bn is now really
+ * just for the buffer cache index for cached buffers. As IO does not use b_bn
+ * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
+ * map directly. Uncached buffers are not allowed to be discontiguous, so this
+ * is safe to do.
+ *
+ * In future, uncached buffers will pass the block number directly to the io
+ * request function and hence these macros will go away at that point.
+ */
+#define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
+#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
+
+static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
+{
+ atomic_set(&bp->b_lru_ref, lru_ref);
+}
+
+static inline int xfs_buf_ispinned(struct xfs_buf *bp)
+{
+ return atomic_read(&bp->b_pin_count);
+}
+
+static inline void xfs_buf_relse(xfs_buf_t *bp)
+{
+ xfs_buf_unlock(bp);
+ xfs_buf_rele(bp);
+}
+
+static inline int
+xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
+{
+ return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
+ cksum_offset);
+}
+
+static inline void
+xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
+{
+ xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
+ cksum_offset);
+}
+
+/*
+ * Handling of buftargs.
+ */
+extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
+ struct block_device *);
+extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
+extern void xfs_wait_buftarg(xfs_buftarg_t *);
+extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
+
+#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
+#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
+
+#endif /* __XFS_BUF_H__ */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 53a71c62025..4654338b03f 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -17,126 +17,36 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
+#include "xfs_trans.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_log.h"
kmem_zone_t *xfs_buf_item_zone;
-#ifdef XFS_TRANS_DEBUG
-/*
- * This function uses an alternate strategy for tracking the bytes
- * that the user requests to be logged. This can then be used
- * in conjunction with the bli_orig array in the buf log item to
- * catch bugs in our callers' code.
- *
- * We also double check the bits set in xfs_buf_item_log using a
- * simple algorithm to check that every byte is accounted for.
- */
-STATIC void
-xfs_buf_item_log_debug(
- xfs_buf_log_item_t *bip,
- uint first,
- uint last)
+static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
{
- uint x;
- uint byte;
- uint nbytes;
- uint chunk_num;
- uint word_num;
- uint bit_num;
- uint bit_set;
- uint *wordp;
-
- ASSERT(bip->bli_logged != NULL);
- byte = first;
- nbytes = last - first + 1;
- bfset(bip->bli_logged, first, nbytes);
- for (x = 0; x < nbytes; x++) {
- chunk_num = byte >> XFS_BLI_SHIFT;
- word_num = chunk_num >> BIT_TO_WORD_SHIFT;
- bit_num = chunk_num & (NBWORD - 1);
- wordp = &(bip->bli_format.blf_data_map[word_num]);
- bit_set = *wordp & (1 << bit_num);
- ASSERT(bit_set);
- byte++;
- }
+ return container_of(lip, struct xfs_buf_log_item, bli_item);
}
-/*
- * This function is called when we flush something into a buffer without
- * logging it. This happens for things like inodes which are logged
- * separately from the buffer.
- */
-void
-xfs_buf_item_flush_log_debug(
- xfs_buf_t *bp,
- uint first,
- uint last)
-{
- xfs_buf_log_item_t *bip;
- uint nbytes;
-
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
- if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) {
- return;
- }
-
- ASSERT(bip->bli_logged != NULL);
- nbytes = last - first + 1;
- bfset(bip->bli_logged, first, nbytes);
-}
+STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
-/*
- * This function is called to verify that our callers have logged
- * all the bytes that they changed.
- *
- * It does this by comparing the original copy of the buffer stored in
- * the buf log item's bli_orig array to the current copy of the buffer
- * and ensuring that all bytes which mismatch are set in the bli_logged
- * array of the buf log item.
- */
-STATIC void
-xfs_buf_item_log_check(
- xfs_buf_log_item_t *bip)
+static inline int
+xfs_buf_log_format_size(
+ struct xfs_buf_log_format *blfp)
{
- char *orig;
- char *buffer;
- int x;
- xfs_buf_t *bp;
-
- ASSERT(bip->bli_orig != NULL);
- ASSERT(bip->bli_logged != NULL);
-
- bp = bip->bli_buf;
- ASSERT(XFS_BUF_COUNT(bp) > 0);
- ASSERT(XFS_BUF_PTR(bp) != NULL);
- orig = bip->bli_orig;
- buffer = XFS_BUF_PTR(bp);
- for (x = 0; x < XFS_BUF_COUNT(bp); x++) {
- if (orig[x] != buffer[x] && !btst(bip->bli_logged, x))
- cmn_err(CE_PANIC,
- "xfs_buf_item_log_check bip %x buffer %x orig %x index %d",
- bip, bp, orig, x);
- }
+ return offsetof(struct xfs_buf_log_format, blf_data_map) +
+ (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
}
-#else
-#define xfs_buf_item_log_debug(x,y,z)
-#define xfs_buf_item_log_check(x)
-#endif
-
-STATIC void xfs_buf_error_relse(xfs_buf_t *bp);
-STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
/*
* This returns the number of log iovecs needed to log the
@@ -148,34 +58,28 @@ STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
*
* If the XFS_BLI_STALE flag has been set, then log nothing.
*/
-STATIC uint
-xfs_buf_item_size(
- xfs_buf_log_item_t *bip)
+STATIC void
+xfs_buf_item_size_segment(
+ struct xfs_buf_log_item *bip,
+ struct xfs_buf_log_format *blfp,
+ int *nvecs,
+ int *nbytes)
{
- uint nvecs;
- int next_bit;
- int last_bit;
- xfs_buf_t *bp;
+ struct xfs_buf *bp = bip->bli_buf;
+ int next_bit;
+ int last_bit;
- ASSERT(atomic_read(&bip->bli_refcount) > 0);
- if (bip->bli_flags & XFS_BLI_STALE) {
- /*
- * The buffer is stale, so all we need to log
- * is the buf log format structure with the
- * cancel flag in it.
- */
- xfs_buf_item_trace("SIZE STALE", bip);
- ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
- return 1;
- }
+ last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
+ if (last_bit == -1)
+ return;
+
+ /*
+ * initial count for a dirty buffer is 2 vectors - the format structure
+ * and the first dirty region.
+ */
+ *nvecs += 2;
+ *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
- bp = bip->bli_buf;
- ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
- nvecs = 1;
- last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
- bip->bli_format.blf_map_size, 0);
- ASSERT(last_bit != -1);
- nvecs++;
while (last_bit != -1) {
/*
* This takes the bit number to start looking from and
@@ -183,75 +87,164 @@ xfs_buf_item_size(
* if there are no more bits set or the start bit is
* beyond the end of the bitmap.
*/
- next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
- bip->bli_format.blf_map_size,
- last_bit + 1);
+ next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
+ last_bit + 1);
/*
* If we run out of bits, leave the loop,
* else if we find a new set of bits bump the number of vecs,
* else keep scanning the current set of bits.
*/
if (next_bit == -1) {
- last_bit = -1;
+ break;
} else if (next_bit != last_bit + 1) {
last_bit = next_bit;
- nvecs++;
- } else if (xfs_buf_offset(bp, next_bit * XFS_BLI_CHUNK) !=
- (xfs_buf_offset(bp, last_bit * XFS_BLI_CHUNK) +
- XFS_BLI_CHUNK)) {
+ (*nvecs)++;
+ } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
+ (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
+ XFS_BLF_CHUNK)) {
last_bit = next_bit;
- nvecs++;
+ (*nvecs)++;
} else {
last_bit++;
}
+ *nbytes += XFS_BLF_CHUNK;
}
-
- xfs_buf_item_trace("SIZE NORM", bip);
- return nvecs;
}
/*
- * This is called to fill in the vector of log iovecs for the
- * given log buf item. It fills the first entry with a buf log
- * format structure, and the rest point to contiguous chunks
- * within the buffer.
+ * This returns the number of log iovecs needed to log the given buf log item.
+ *
+ * It calculates this as 1 iovec for the buf log format structure and 1 for each
+ * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
+ * in a single iovec.
+ *
+ * Discontiguous buffers need a format structure per region that that is being
+ * logged. This makes the changes in the buffer appear to log recovery as though
+ * they came from separate buffers, just like would occur if multiple buffers
+ * were used instead of a single discontiguous buffer. This enables
+ * discontiguous buffers to be in-memory constructs, completely transparent to
+ * what ends up on disk.
+ *
+ * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
+ * format structures.
*/
STATIC void
-xfs_buf_item_format(
- xfs_buf_log_item_t *bip,
- xfs_log_iovec_t *log_vector)
+xfs_buf_item_size(
+ struct xfs_log_item *lip,
+ int *nvecs,
+ int *nbytes)
{
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ int i;
+
+ ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ if (bip->bli_flags & XFS_BLI_STALE) {
+ /*
+ * The buffer is stale, so all we need to log
+ * is the buf log format structure with the
+ * cancel flag in it.
+ */
+ trace_xfs_buf_item_size_stale(bip);
+ ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
+ *nvecs += bip->bli_format_count;
+ for (i = 0; i < bip->bli_format_count; i++) {
+ *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
+ }
+ return;
+ }
+
+ ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
+
+ if (bip->bli_flags & XFS_BLI_ORDERED) {
+ /*
+ * The buffer has been logged just to order it.
+ * It is not being included in the transaction
+ * commit, so no vectors are used at all.
+ */
+ trace_xfs_buf_item_size_ordered(bip);
+ *nvecs = XFS_LOG_VEC_ORDERED;
+ return;
+ }
+
+ /*
+ * the vector count is based on the number of buffer vectors we have
+ * dirty bits in. This will only be greater than one when we have a
+ * compound buffer with more than one segment dirty. Hence for compound
+ * buffers we need to track which segment the dirty bits correspond to,
+ * and when we move from one segment to the next increment the vector
+ * count for the extra buf log format structure that will need to be
+ * written.
+ */
+ for (i = 0; i < bip->bli_format_count; i++) {
+ xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
+ nvecs, nbytes);
+ }
+ trace_xfs_buf_item_size(bip);
+}
+
+static inline void
+xfs_buf_item_copy_iovec(
+ struct xfs_log_vec *lv,
+ struct xfs_log_iovec **vecp,
+ struct xfs_buf *bp,
+ uint offset,
+ int first_bit,
+ uint nbits)
+{
+ offset += first_bit * XFS_BLF_CHUNK;
+ xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
+ xfs_buf_offset(bp, offset),
+ nbits * XFS_BLF_CHUNK);
+}
+
+static inline bool
+xfs_buf_item_straddle(
+ struct xfs_buf *bp,
+ uint offset,
+ int next_bit,
+ int last_bit)
+{
+ return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
+ (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
+ XFS_BLF_CHUNK);
+}
+
+static void
+xfs_buf_item_format_segment(
+ struct xfs_buf_log_item *bip,
+ struct xfs_log_vec *lv,
+ struct xfs_log_iovec **vecp,
+ uint offset,
+ struct xfs_buf_log_format *blfp)
+{
+ struct xfs_buf *bp = bip->bli_buf;
uint base_size;
- uint nvecs;
- xfs_log_iovec_t *vecp;
- xfs_buf_t *bp;
int first_bit;
int last_bit;
int next_bit;
uint nbits;
- uint buffer_offset;
- ASSERT(atomic_read(&bip->bli_refcount) > 0);
- ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
- (bip->bli_flags & XFS_BLI_STALE));
- bp = bip->bli_buf;
- vecp = log_vector;
+ /* copy the flags across from the base format item */
+ blfp->blf_flags = bip->__bli_format.blf_flags;
/*
- * The size of the base structure is the size of the
- * declared structure plus the space for the extra words
- * of the bitmap. We subtract one from the map size, because
- * the first element of the bitmap is accounted for in the
- * size of the base structure.
+ * Base size is the actual size of the ondisk structure - it reflects
+ * the actual size of the dirty bitmap rather than the size of the in
+ * memory structure.
*/
- base_size =
- (uint)(sizeof(xfs_buf_log_format_t) +
- ((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
- vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
- vecp->i_len = base_size;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BFORMAT);
- vecp++;
- nvecs = 1;
+ base_size = xfs_buf_log_format_size(blfp);
+
+ first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
+ if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
+ /*
+ * If the map is not be dirty in the transaction, mark
+ * the size as zero and do not advance the vector pointer.
+ */
+ return;
+ }
+
+ blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
+ blfp->blf_size = 1;
if (bip->bli_flags & XFS_BLI_STALE) {
/*
@@ -259,18 +252,15 @@ xfs_buf_item_format(
* is the buf log format structure with the
* cancel flag in it.
*/
- xfs_buf_item_trace("FORMAT STALE", bip);
- ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
- bip->bli_format.blf_size = nvecs;
+ trace_xfs_buf_item_format_stale(bip);
+ ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
return;
}
+
/*
* Fill in an iovec for each set of contiguous chunks.
*/
- first_bit = xfs_next_bit(bip->bli_format.blf_data_map,
- bip->bli_format.blf_map_size, 0);
- ASSERT(first_bit != -1);
last_bit = first_bit;
nbits = 1;
for (;;) {
@@ -280,48 +270,25 @@ xfs_buf_item_format(
* if there are no more bits set or the start bit is
* beyond the end of the bitmap.
*/
- next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
- bip->bli_format.blf_map_size,
- (uint)last_bit + 1);
+ next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
+ (uint)last_bit + 1);
/*
- * If we run out of bits fill in the last iovec and get
- * out of the loop.
- * Else if we start a new set of bits then fill in the
- * iovec for the series we were looking at and start
- * counting the bits in the new one.
- * Else we're still in the same set of bits so just
- * keep counting and scanning.
+ * If we run out of bits fill in the last iovec and get out of
+ * the loop. Else if we start a new set of bits then fill in
+ * the iovec for the series we were looking at and start
+ * counting the bits in the new one. Else we're still in the
+ * same set of bits so just keep counting and scanning.
*/
if (next_bit == -1) {
- buffer_offset = first_bit * XFS_BLI_CHUNK;
- vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
- vecp->i_len = nbits * XFS_BLI_CHUNK;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
- nvecs++;
+ xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
+ first_bit, nbits);
+ blfp->blf_size++;
break;
- } else if (next_bit != last_bit + 1) {
- buffer_offset = first_bit * XFS_BLI_CHUNK;
- vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
- vecp->i_len = nbits * XFS_BLI_CHUNK;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
- nvecs++;
- vecp++;
- first_bit = next_bit;
- last_bit = next_bit;
- nbits = 1;
- } else if (xfs_buf_offset(bp, next_bit << XFS_BLI_SHIFT) !=
- (xfs_buf_offset(bp, last_bit << XFS_BLI_SHIFT) +
- XFS_BLI_CHUNK)) {
- buffer_offset = first_bit * XFS_BLI_CHUNK;
- vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
- vecp->i_len = nbits * XFS_BLI_CHUNK;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
-/* You would think we need to bump the nvecs here too, but we do not
- * this number is used by recovery, and it gets confused by the boundary
- * split here
- * nvecs++;
- */
- vecp++;
+ } else if (next_bit != last_bit + 1 ||
+ xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
+ xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
+ first_bit, nbits);
+ blfp->blf_size++;
first_bit = next_bit;
last_bit = next_bit;
nbits = 1;
@@ -330,269 +297,363 @@ xfs_buf_item_format(
nbits++;
}
}
- bip->bli_format.blf_size = nvecs;
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given log buf item. It fills the first entry with a buf log
+ * format structure, and the rest point to contiguous chunks
+ * within the buffer.
+ */
+STATIC void
+xfs_buf_item_format(
+ struct xfs_log_item *lip,
+ struct xfs_log_vec *lv)
+{
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
+ struct xfs_log_iovec *vecp = NULL;
+ uint offset = 0;
+ int i;
+
+ ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
+ (bip->bli_flags & XFS_BLI_STALE));
+
+ /*
+ * If it is an inode buffer, transfer the in-memory state to the
+ * format flags and clear the in-memory state.
+ *
+ * For buffer based inode allocation, we do not transfer
+ * this state if the inode buffer allocation has not yet been committed
+ * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
+ * correct replay of the inode allocation.
+ *
+ * For icreate item based inode allocation, the buffers aren't written
+ * to the journal during allocation, and hence we should always tag the
+ * buffer as an inode buffer so that the correct unlinked list replay
+ * occurs during recovery.
+ */
+ if (bip->bli_flags & XFS_BLI_INODE_BUF) {
+ if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
+ !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
+ xfs_log_item_in_current_chkpt(lip)))
+ bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
+ bip->bli_flags &= ~XFS_BLI_INODE_BUF;
+ }
+
+ if ((bip->bli_flags & (XFS_BLI_ORDERED|XFS_BLI_STALE)) ==
+ XFS_BLI_ORDERED) {
+ /*
+ * The buffer has been logged just to order it. It is not being
+ * included in the transaction commit, so don't format it.
+ */
+ trace_xfs_buf_item_format_ordered(bip);
+ return;
+ }
+
+ for (i = 0; i < bip->bli_format_count; i++) {
+ xfs_buf_item_format_segment(bip, lv, &vecp, offset,
+ &bip->bli_formats[i]);
+ offset += bp->b_maps[i].bm_len;
+ }
/*
* Check to make sure everything is consistent.
*/
- xfs_buf_item_trace("FORMAT NORM", bip);
- xfs_buf_item_log_check(bip);
+ trace_xfs_buf_item_format(bip);
}
/*
- * This is called to pin the buffer associated with the buf log
- * item in memory so it cannot be written out. Simply call bpin()
- * on the buffer to do this.
+ * This is called to pin the buffer associated with the buf log item in memory
+ * so it cannot be written out.
+ *
+ * We also always take a reference to the buffer log item here so that the bli
+ * is held while the item is pinned in memory. This means that we can
+ * unconditionally drop the reference count a transaction holds when the
+ * transaction is completed.
*/
STATIC void
xfs_buf_item_pin(
- xfs_buf_log_item_t *bip)
+ struct xfs_log_item *lip)
{
- xfs_buf_t *bp;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- bp = bip->bli_buf;
- ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
+ (bip->bli_flags & XFS_BLI_ORDERED) ||
(bip->bli_flags & XFS_BLI_STALE));
- xfs_buf_item_trace("PIN", bip);
- xfs_buftrace("XFS_PIN", bp);
- xfs_bpin(bp);
-}
+ trace_xfs_buf_item_pin(bip);
+
+ atomic_inc(&bip->bli_refcount);
+ atomic_inc(&bip->bli_buf->b_pin_count);
+}
/*
* This is called to unpin the buffer associated with the buf log
* item which was previously pinned with a call to xfs_buf_item_pin().
- * Just call bunpin() on the buffer to do this.
*
* Also drop the reference to the buf item for the current transaction.
* If the XFS_BLI_STALE flag is set and we are the last reference,
* then free up the buf log item and unlock the buffer.
+ *
+ * If the remove flag is set we are called from uncommit in the
+ * forced-shutdown path. If that is true and the reference count on
+ * the log item is going to drop to zero we need to free the item's
+ * descriptor in the transaction.
*/
STATIC void
xfs_buf_item_unpin(
- xfs_buf_log_item_t *bip,
- int stale)
+ struct xfs_log_item *lip,
+ int remove)
{
- xfs_mount_t *mp;
- xfs_buf_t *bp;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ xfs_buf_t *bp = bip->bli_buf;
+ struct xfs_ail *ailp = lip->li_ailp;
+ int stale = bip->bli_flags & XFS_BLI_STALE;
int freed;
- bp = bip->bli_buf;
- ASSERT(bp != NULL);
- ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
+ ASSERT(bp->b_fspriv == bip);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
- xfs_buf_item_trace("UNPIN", bip);
- xfs_buftrace("XFS_UNPIN", bp);
+
+ trace_xfs_buf_item_unpin(bip);
freed = atomic_dec_and_test(&bip->bli_refcount);
- mp = bip->bli_item.li_mountp;
- xfs_bunpin(bp);
+
+ if (atomic_dec_and_test(&bp->b_pin_count))
+ wake_up_all(&bp->b_waiters);
+
if (freed && stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE);
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
- ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
+ ASSERT(xfs_buf_islocked(bp));
ASSERT(XFS_BUF_ISSTALE(bp));
- ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
- xfs_buf_item_trace("UNPIN STALE", bip);
- xfs_buftrace("XFS_UNPIN STALE", bp);
+ ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
+
+ trace_xfs_buf_item_unpin_stale(bip);
+
+ if (remove) {
+ /*
+ * If we are in a transaction context, we have to
+ * remove the log item from the transaction as we are
+ * about to release our reference to the buffer. If we
+ * don't, the unlock that occurs later in
+ * xfs_trans_uncommit() will try to reference the
+ * buffer which we no longer have a hold on.
+ */
+ if (lip->li_desc)
+ xfs_trans_del_item(lip);
+
+ /*
+ * Since the transaction no longer refers to the buffer,
+ * the buffer should no longer refer to the transaction.
+ */
+ bp->b_transp = NULL;
+ }
+
/*
* If we get called here because of an IO error, we may
- * or may not have the item on the AIL. xfs_trans_delete_ail()
+ * or may not have the item on the AIL. xfs_trans_ail_delete()
* will take care of that situation.
- * xfs_trans_delete_ail() drops the AIL lock.
+ * xfs_trans_ail_delete() drops the AIL lock.
*/
if (bip->bli_flags & XFS_BLI_STALE_INODE) {
- xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);
- XFS_BUF_SET_FSPRIVATE(bp, NULL);
- XFS_BUF_CLR_IODONE_FUNC(bp);
+ xfs_buf_do_callbacks(bp);
+ bp->b_fspriv = NULL;
+ bp->b_iodone = NULL;
} else {
- spin_lock(&mp->m_ail_lock);
- xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip);
+ spin_lock(&ailp->xa_lock);
+ xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
xfs_buf_item_relse(bp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
+ ASSERT(bp->b_fspriv == NULL);
}
xfs_buf_relse(bp);
- }
-}
-
-/*
- * this is called from uncommit in the forced-shutdown path.
- * we need to check to see if the reference count on the log item
- * is going to drop to zero. If so, unpin will free the log item
- * so we need to free the item's descriptor (that points to the item)
- * in the transaction.
- */
-STATIC void
-xfs_buf_item_unpin_remove(
- xfs_buf_log_item_t *bip,
- xfs_trans_t *tp)
-{
- xfs_buf_t *bp;
- xfs_log_item_desc_t *lidp;
- int stale = 0;
-
- bp = bip->bli_buf;
- /*
- * will xfs_buf_item_unpin() call xfs_buf_item_relse()?
- */
- if ((atomic_read(&bip->bli_refcount) == 1) &&
- (bip->bli_flags & XFS_BLI_STALE)) {
- ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
- xfs_buf_item_trace("UNPIN REMOVE", bip);
- xfs_buftrace("XFS_UNPIN_REMOVE", bp);
+ } else if (freed && remove) {
/*
- * yes -- clear the xaction descriptor in-use flag
- * and free the chunk if required. We can safely
- * do some work here and then call buf_item_unpin
- * to do the rest because if the if is true, then
- * we are holding the buffer locked so no one else
- * will be able to bump up the refcount.
+ * There are currently two references to the buffer - the active
+ * LRU reference and the buf log item. What we are about to do
+ * here - simulate a failed IO completion - requires 3
+ * references.
+ *
+ * The LRU reference is removed by the xfs_buf_stale() call. The
+ * buf item reference is removed by the xfs_buf_iodone()
+ * callback that is run by xfs_buf_do_callbacks() during ioend
+ * processing (via the bp->b_iodone callback), and then finally
+ * the ioend processing will drop the IO reference if the buffer
+ * is marked XBF_ASYNC.
+ *
+ * Hence we need to take an additional reference here so that IO
+ * completion processing doesn't free the buffer prematurely.
*/
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) bip);
- stale = lidp->lid_flags & XFS_LID_BUF_STALE;
- xfs_trans_free_item(tp, lidp);
- /*
- * Since the transaction no longer refers to the buffer,
- * the buffer should no longer refer to the transaction.
- */
- XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+ xfs_buf_lock(bp);
+ xfs_buf_hold(bp);
+ bp->b_flags |= XBF_ASYNC;
+ xfs_buf_ioerror(bp, EIO);
+ XFS_BUF_UNDONE(bp);
+ xfs_buf_stale(bp);
+ xfs_buf_ioend(bp, 0);
}
-
- xfs_buf_item_unpin(bip, stale);
-
- return;
}
/*
- * This is called to attempt to lock the buffer associated with this
- * buf log item. Don't sleep on the buffer lock. If we can't get
- * the lock right away, return 0. If we can get the lock, pull the
- * buffer from the free list, mark it busy, and return 1.
+ * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
+ * seconds so as to not spam logs too much on repeated detection of the same
+ * buffer being bad..
*/
+
+DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
+
STATIC uint
-xfs_buf_item_trylock(
- xfs_buf_log_item_t *bip)
+xfs_buf_item_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
{
- xfs_buf_t *bp;
-
- bp = bip->bli_buf;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
+ uint rval = XFS_ITEM_SUCCESS;
- if (XFS_BUF_ISPINNED(bp)) {
+ if (xfs_buf_ispinned(bp))
return XFS_ITEM_PINNED;
- }
-
- if (!XFS_BUF_CPSEMA(bp)) {
+ if (!xfs_buf_trylock(bp)) {
+ /*
+ * If we have just raced with a buffer being pinned and it has
+ * been marked stale, we could end up stalling until someone else
+ * issues a log force to unpin the stale buffer. Check for the
+ * race condition here so xfsaild recognizes the buffer is pinned
+ * and queues a log force to move it along.
+ */
+ if (xfs_buf_ispinned(bp))
+ return XFS_ITEM_PINNED;
return XFS_ITEM_LOCKED;
}
- /*
- * Remove the buffer from the free list. Only do this
- * if it's on the free list. Private buffers like the
- * superblock buffer are not.
- */
- XFS_BUF_HOLD(bp);
-
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- xfs_buf_item_trace("TRYLOCK SUCCESS", bip);
- return XFS_ITEM_SUCCESS;
+
+ trace_xfs_buf_item_push(bip);
+
+ /* has a previous flush failed due to IO errors? */
+ if ((bp->b_flags & XBF_WRITE_FAIL) &&
+ ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
+ xfs_warn(bp->b_target->bt_mount,
+"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
+ (long long)bp->b_bn);
+ }
+
+ if (!xfs_buf_delwri_queue(bp, buffer_list))
+ rval = XFS_ITEM_FLUSHING;
+ xfs_buf_unlock(bp);
+ return rval;
}
/*
- * Release the buffer associated with the buf log item.
- * If there is no dirty logged data associated with the
- * buffer recorded in the buf log item, then free the
- * buf log item and remove the reference to it in the
- * buffer.
+ * Release the buffer associated with the buf log item. If there is no dirty
+ * logged data associated with the buffer recorded in the buf log item, then
+ * free the buf log item and remove the reference to it in the buffer.
+ *
+ * This call ignores the recursion count. It is only called when the buffer
+ * should REALLY be unlocked, regardless of the recursion count.
*
- * This call ignores the recursion count. It is only called
- * when the buffer should REALLY be unlocked, regardless
- * of the recursion count.
+ * We unconditionally drop the transaction's reference to the log item. If the
+ * item was logged, then another reference was taken when it was pinned, so we
+ * can safely drop the transaction reference now. This also allows us to avoid
+ * potential races with the unpin code freeing the bli by not referencing the
+ * bli after we've dropped the reference count.
*
- * If the XFS_BLI_HOLD flag is set in the buf log item, then
- * free the log item if necessary but do not unlock the buffer.
- * This is for support of xfs_trans_bhold(). Make sure the
- * XFS_BLI_HOLD field is cleared if we don't free the item.
+ * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
+ * if necessary but do not unlock the buffer. This is for support of
+ * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
+ * free the item.
*/
STATIC void
xfs_buf_item_unlock(
- xfs_buf_log_item_t *bip)
+ struct xfs_log_item *lip)
{
- int aborted;
- xfs_buf_t *bp;
- uint hold;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
+ bool clean;
+ bool aborted;
+ int flags;
- bp = bip->bli_buf;
- xfs_buftrace("XFS_UNLOCK", bp);
+ /* Clear the buffer's association with this transaction. */
+ bp->b_transp = NULL;
/*
- * Clear the buffer's association with this transaction.
+ * If this is a transaction abort, don't return early. Instead, allow
+ * the brelse to happen. Normally it would be done for stale
+ * (cancelled) buffers at unpin time, but we'll never go through the
+ * pin/unpin cycle if we abort inside commit.
*/
- XFS_BUF_SET_FSPRIVATE2(bp, NULL);
-
+ aborted = (lip->li_flags & XFS_LI_ABORTED) ? true : false;
/*
- * If this is a transaction abort, don't return early.
- * Instead, allow the brelse to happen.
- * Normally it would be done for stale (cancelled) buffers
- * at unpin time, but we'll never go through the pin/unpin
- * cycle if we abort inside commit.
+ * Before possibly freeing the buf item, copy the per-transaction state
+ * so we can reference it safely later after clearing it from the
+ * buffer log item.
*/
- aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0;
+ flags = bip->bli_flags;
+ bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
/*
- * If the buf item is marked stale, then don't do anything.
- * We'll unlock the buffer and free the buf item when the
- * buffer is unpinned for the last time.
+ * If the buf item is marked stale, then don't do anything. We'll
+ * unlock the buffer and free the buf item when the buffer is unpinned
+ * for the last time.
*/
- if (bip->bli_flags & XFS_BLI_STALE) {
- bip->bli_flags &= ~XFS_BLI_LOGGED;
- xfs_buf_item_trace("UNLOCK STALE", bip);
- ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
- if (!aborted)
+ if (flags & XFS_BLI_STALE) {
+ trace_xfs_buf_item_unlock_stale(bip);
+ ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
+ if (!aborted) {
+ atomic_dec(&bip->bli_refcount);
return;
+ }
}
- /*
- * Drop the transaction's reference to the log item if
- * it was not logged as part of the transaction. Otherwise
- * we'll drop the reference in xfs_buf_item_unpin() when
- * the transaction is really through with the buffer.
- */
- if (!(bip->bli_flags & XFS_BLI_LOGGED)) {
- atomic_dec(&bip->bli_refcount);
- } else {
- /*
- * Clear the logged flag since this is per
- * transaction state.
- */
- bip->bli_flags &= ~XFS_BLI_LOGGED;
- }
+ trace_xfs_buf_item_unlock(bip);
/*
- * Before possibly freeing the buf item, determine if we should
- * release the buffer at the end of this routine.
+ * If the buf item isn't tracking any data, free it, otherwise drop the
+ * reference we hold to it. If we are aborting the transaction, this may
+ * be the only reference to the buf item, so we free it anyway
+ * regardless of whether it is dirty or not. A dirty abort implies a
+ * shutdown, anyway.
+ *
+ * Ordered buffers are dirty but may have no recorded changes, so ensure
+ * we only release clean items here.
*/
- hold = bip->bli_flags & XFS_BLI_HOLD;
- xfs_buf_item_trace("UNLOCK", bip);
+ clean = (flags & XFS_BLI_DIRTY) ? false : true;
+ if (clean) {
+ int i;
+ for (i = 0; i < bip->bli_format_count; i++) {
+ if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
+ bip->bli_formats[i].blf_map_size)) {
+ clean = false;
+ break;
+ }
+ }
+ }
/*
- * If the buf item isn't tracking any data, free it.
- * Otherwise, if XFS_BLI_HOLD is set clear it.
+ * Clean buffers, by definition, cannot be in the AIL. However, aborted
+ * buffers may be dirty and hence in the AIL. Therefore if we are
+ * aborting a buffer and we've just taken the last refernce away, we
+ * have to check if it is in the AIL before freeing it. We need to free
+ * it in this case, because an aborted transaction has already shut the
+ * filesystem down and this is the last chance we will have to do so.
*/
- if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
- bip->bli_format.blf_map_size)) {
- xfs_buf_item_relse(bp);
- } else if (hold) {
- bip->bli_flags &= ~XFS_BLI_HOLD;
+ if (atomic_dec_and_test(&bip->bli_refcount)) {
+ if (clean)
+ xfs_buf_item_relse(bp);
+ else if (aborted) {
+ ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
+ if (lip->li_flags & XFS_LI_IN_AIL) {
+ spin_lock(&lip->li_ailp->xa_lock);
+ xfs_trans_ail_delete(lip->li_ailp, lip,
+ SHUTDOWN_LOG_IO_ERROR);
+ }
+ xfs_buf_item_relse(bp);
+ }
}
- /*
- * Release the buffer if XFS_BLI_HOLD was not set.
- */
- if (!hold) {
+ if (!(flags & XFS_BLI_HOLD))
xfs_buf_relse(bp);
- }
}
/*
@@ -615,74 +676,68 @@ xfs_buf_item_unlock(
*/
STATIC xfs_lsn_t
xfs_buf_item_committed(
- xfs_buf_log_item_t *bip,
+ struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
- xfs_buf_item_trace("COMMITTED", bip);
- if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
- (bip->bli_item.li_lsn != 0)) {
- return bip->bli_item.li_lsn;
- }
- return (lsn);
-}
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
-/*
- * This is called to asynchronously write the buffer associated with this
- * buf log item out to disk. The buffer will already have been locked by
- * a successful call to xfs_buf_item_trylock(). If the buffer still has
- * B_DELWRI set, then get it going out to disk with a call to bawrite().
- * If not, then just release the buffer.
- */
-STATIC void
-xfs_buf_item_push(
- xfs_buf_log_item_t *bip)
-{
- xfs_buf_t *bp;
+ trace_xfs_buf_item_committed(bip);
- ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- xfs_buf_item_trace("PUSH", bip);
-
- bp = bip->bli_buf;
-
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- int error;
- error = xfs_bawrite(bip->bli_item.li_mountp, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, bip->bli_item.li_mountp,
- "xfs_buf_item_push: pushbuf error %d on bip %p, bp %p",
- error, bip, bp);
- } else {
- xfs_buf_relse(bp);
- }
+ if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
+ return lip->li_lsn;
+ return lsn;
}
-/* ARGSUSED */
STATIC void
-xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
+xfs_buf_item_committing(
+ struct xfs_log_item *lip,
+ xfs_lsn_t commit_lsn)
{
}
/*
* This is the ops vector shared by all buf log items.
*/
-static struct xfs_item_ops xfs_buf_item_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_buf_item_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_buf_item_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
- xfs_buf_item_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_buf_item_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_buf_item_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push,
- .iop_pushbuf = NULL,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_buf_item_committing
+static const struct xfs_item_ops xfs_buf_item_ops = {
+ .iop_size = xfs_buf_item_size,
+ .iop_format = xfs_buf_item_format,
+ .iop_pin = xfs_buf_item_pin,
+ .iop_unpin = xfs_buf_item_unpin,
+ .iop_unlock = xfs_buf_item_unlock,
+ .iop_committed = xfs_buf_item_committed,
+ .iop_push = xfs_buf_item_push,
+ .iop_committing = xfs_buf_item_committing
};
+STATIC int
+xfs_buf_item_get_format(
+ struct xfs_buf_log_item *bip,
+ int count)
+{
+ ASSERT(bip->bli_formats == NULL);
+ bip->bli_format_count = count;
+
+ if (count == 1) {
+ bip->bli_formats = &bip->__bli_format;
+ return 0;
+ }
+
+ bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
+ KM_SLEEP);
+ if (!bip->bli_formats)
+ return ENOMEM;
+ return 0;
+}
+
+STATIC void
+xfs_buf_item_free_format(
+ struct xfs_buf_log_item *bip)
+{
+ if (bip->bli_formats != &bip->__bli_format) {
+ kmem_free(bip->bli_formats);
+ bip->bli_formats = NULL;
+ }
+}
/*
* Allocate a new buf log item to go with the given buffer.
@@ -696,10 +751,12 @@ xfs_buf_item_init(
xfs_buf_t *bp,
xfs_mount_t *mp)
{
- xfs_log_item_t *lip;
+ xfs_log_item_t *lip = bp->b_fspriv;
xfs_buf_log_item_t *bip;
int chunks;
int map_size;
+ int error;
+ int i;
/*
* Check to see if there is already a buf log item for
@@ -707,62 +764,45 @@ xfs_buf_item_init(
* the first. If we do already have one, there is
* nothing to do here so return.
*/
- if (XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *) != mp)
- XFS_BUF_SET_FSPRIVATE3(bp, mp);
- XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
- if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
- lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
- if (lip->li_type == XFS_LI_BUF) {
- return;
- }
- }
+ ASSERT(bp->b_target->bt_mount == mp);
+ if (lip != NULL && lip->li_type == XFS_LI_BUF)
+ return;
- /*
- * chunks is the number of XFS_BLI_CHUNK size pieces
- * the buffer can be divided into. Make sure not to
- * truncate any pieces. map_size is the size of the
- * bitmap needed to describe the chunks of the buffer.
- */
- chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLI_CHUNK - 1)) >> XFS_BLI_SHIFT);
- map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
-
- bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
- KM_SLEEP);
- bip->bli_item.li_type = XFS_LI_BUF;
- bip->bli_item.li_ops = &xfs_buf_item_ops;
- bip->bli_item.li_mountp = mp;
+ bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
+ xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
bip->bli_buf = bp;
- bip->bli_format.blf_type = XFS_LI_BUF;
- bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
- bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
- bip->bli_format.blf_map_size = map_size;
-#ifdef XFS_BLI_TRACE
- bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_SLEEP);
-#endif
-
-#ifdef XFS_TRANS_DEBUG
+ xfs_buf_hold(bp);
+
/*
- * Allocate the arrays for tracking what needs to be logged
- * and what our callers request to be logged. bli_orig
- * holds a copy of the original, clean buffer for comparison
- * against, and bli_logged keeps a 1 bit flag per byte in
- * the buffer to indicate which bytes the callers have asked
- * to have logged.
+ * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
+ * can be divided into. Make sure not to truncate any pieces.
+ * map_size is the size of the bitmap needed to describe the
+ * chunks of the buffer.
+ *
+ * Discontiguous buffer support follows the layout of the underlying
+ * buffer. This makes the implementation as simple as possible.
*/
- bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP);
- memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp));
- bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP);
-#endif
+ error = xfs_buf_item_get_format(bip, bp->b_map_count);
+ ASSERT(error == 0);
+
+ for (i = 0; i < bip->bli_format_count; i++) {
+ chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
+ XFS_BLF_CHUNK);
+ map_size = DIV_ROUND_UP(chunks, NBWORD);
+
+ bip->bli_formats[i].blf_type = XFS_LI_BUF;
+ bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
+ bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
+ bip->bli_formats[i].blf_map_size = map_size;
+ }
/*
* Put the buf item into the list of items attached to the
* buffer at the front.
*/
- if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
- bip->bli_item.li_bio_list =
- XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
- }
- XFS_BUF_SET_FSPRIVATE(bp, bip);
+ if (bp->b_fspriv)
+ bip->bli_item.li_bio_list = bp->b_fspriv;
+ bp->b_fspriv = bip;
}
@@ -770,11 +810,11 @@ xfs_buf_item_init(
* Mark bytes first through last inclusive as dirty in the buf
* item's bitmap.
*/
-void
-xfs_buf_item_log(
- xfs_buf_log_item_t *bip,
+static void
+xfs_buf_item_log_segment(
uint first,
- uint last)
+ uint last,
+ uint *map)
{
uint first_bit;
uint last_bit;
@@ -787,16 +827,10 @@ xfs_buf_item_log(
uint mask;
/*
- * Mark the item as having some dirty data for
- * quick reference in xfs_buf_item_dirty.
- */
- bip->bli_flags |= XFS_BLI_DIRTY;
-
- /*
* Convert byte offsets to bit numbers.
*/
- first_bit = first >> XFS_BLI_SHIFT;
- last_bit = last >> XFS_BLI_SHIFT;
+ first_bit = first >> XFS_BLF_SHIFT;
+ last_bit = last >> XFS_BLF_SHIFT;
/*
* Calculate the total number of bits to be set.
@@ -808,7 +842,7 @@ xfs_buf_item_log(
* to set a bit in.
*/
word_num = first_bit >> BIT_TO_WORD_SHIFT;
- wordp = &(bip->bli_format.blf_data_map[word_num]);
+ wordp = &map[word_num];
/*
* Calculate the starting bit in the first word.
@@ -851,13 +885,50 @@ xfs_buf_item_log(
mask = (1 << end_bit) - 1;
*wordp |= mask;
}
+}
- xfs_buf_item_log_debug(bip, first, last);
+/*
+ * Mark bytes first through last inclusive as dirty in the buf
+ * item's bitmap.
+ */
+void
+xfs_buf_item_log(
+ xfs_buf_log_item_t *bip,
+ uint first,
+ uint last)
+{
+ int i;
+ uint start;
+ uint end;
+ struct xfs_buf *bp = bip->bli_buf;
+
+ /*
+ * walk each buffer segment and mark them dirty appropriately.
+ */
+ start = 0;
+ for (i = 0; i < bip->bli_format_count; i++) {
+ if (start > last)
+ break;
+ end = start + BBTOB(bp->b_maps[i].bm_len);
+ if (first > end) {
+ start += BBTOB(bp->b_maps[i].bm_len);
+ continue;
+ }
+ if (first < start)
+ first = start;
+ if (end > last)
+ end = last;
+
+ xfs_buf_item_log_segment(first, end,
+ &bip->bli_formats[i].blf_data_map[0]);
+
+ start += bp->b_maps[i].bm_len;
+ }
}
/*
- * Return 1 if the buffer has some data that has been logged (at any
+ * Return 1 if the buffer has been logged or ordered in a transaction (at any
* point, not just the current transaction) and 0 if not.
*/
uint
@@ -867,6 +938,14 @@ xfs_buf_item_dirty(
return (bip->bli_flags & XFS_BLI_DIRTY);
}
+STATIC void
+xfs_buf_item_free(
+ xfs_buf_log_item_t *bip)
+{
+ xfs_buf_item_free_format(bip);
+ kmem_zone_free(xfs_buf_item_zone, bip);
+}
+
/*
* This is called when the buf log item is no longer needed. It should
* free the buf log item associated with the given buffer and clear
@@ -878,27 +957,17 @@ void
xfs_buf_item_relse(
xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip;
+ xfs_buf_log_item_t *bip = bp->b_fspriv;
- xfs_buftrace("XFS_RELSE", bp);
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
- XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
- if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
- (XFS_BUF_IODONE_FUNC(bp) != NULL)) {
- XFS_BUF_CLR_IODONE_FUNC(bp);
- }
+ trace_xfs_buf_item_relse(bp, _RET_IP_);
+ ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
-#ifdef XFS_TRANS_DEBUG
- kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp));
- bip->bli_orig = NULL;
- kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY);
- bip->bli_logged = NULL;
-#endif /* XFS_TRANS_DEBUG */
+ bp->b_fspriv = bip->bli_item.li_bio_list;
+ if (bp->b_fspriv == NULL)
+ bp->b_iodone = NULL;
-#ifdef XFS_BLI_TRACE
- ktrace_free(bip->bli_trace);
-#endif
- kmem_zone_free(xfs_buf_item_zone, bip);
+ xfs_buf_rele(bp);
+ xfs_buf_item_free(bip);
}
@@ -919,32 +988,42 @@ xfs_buf_attach_iodone(
{
xfs_log_item_t *head_lip;
- ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+ ASSERT(xfs_buf_islocked(bp));
lip->li_cb = cb;
- if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
- head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+ head_lip = bp->b_fspriv;
+ if (head_lip) {
lip->li_bio_list = head_lip->li_bio_list;
head_lip->li_bio_list = lip;
} else {
- XFS_BUF_SET_FSPRIVATE(bp, lip);
+ bp->b_fspriv = lip;
}
- ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) ||
- (XFS_BUF_IODONE_FUNC(bp) == NULL));
- XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
+ ASSERT(bp->b_iodone == NULL ||
+ bp->b_iodone == xfs_buf_iodone_callbacks);
+ bp->b_iodone = xfs_buf_iodone_callbacks;
}
+/*
+ * We can have many callbacks on a buffer. Running the callbacks individually
+ * can cause a lot of contention on the AIL lock, so we allow for a single
+ * callback to be able to scan the remaining lip->li_bio_list for other items
+ * of the same type and callback to be processed in the first call.
+ *
+ * As a result, the loop walking the callback list below will also modify the
+ * list. it removes the first item from the list and then runs the callback.
+ * The loop then restarts from the new head of the list. This allows the
+ * callback to scan and modify the list attached to the buffer and we don't
+ * have to care about maintaining a next item pointer.
+ */
STATIC void
xfs_buf_do_callbacks(
- xfs_buf_t *bp,
- xfs_log_item_t *lip)
+ struct xfs_buf *bp)
{
- xfs_log_item_t *nlip;
+ struct xfs_log_item *lip;
- while (lip != NULL) {
- nlip = lip->li_bio_list;
+ while ((lip = bp->b_fspriv) != NULL) {
+ bp->b_fspriv = lip->li_bio_list;
ASSERT(lip->li_cb != NULL);
/*
* Clear the next pointer so we don't have any
@@ -954,7 +1033,6 @@ xfs_buf_do_callbacks(
*/
lip->li_bio_list = NULL;
lip->li_cb(bp, lip);
- lip = nlip;
}
}
@@ -967,141 +1045,78 @@ xfs_buf_do_callbacks(
*/
void
xfs_buf_iodone_callbacks(
- xfs_buf_t *bp)
+ struct xfs_buf *bp)
{
- xfs_log_item_t *lip;
- static ulong lasttime;
- static xfs_buftarg_t *lasttarg;
- xfs_mount_t *mp;
+ struct xfs_log_item *lip = bp->b_fspriv;
+ struct xfs_mount *mp = lip->li_mountp;
+ static ulong lasttime;
+ static xfs_buftarg_t *lasttarg;
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
- lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+ if (likely(!bp->b_error))
+ goto do_callbacks;
- if (XFS_BUF_GETERROR(bp) != 0) {
- /*
- * If we've already decided to shutdown the filesystem
- * because of IO errors, there's no point in giving this
- * a retry.
- */
- mp = lip->li_mountp;
- if (XFS_FORCED_SHUTDOWN(mp)) {
- ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
- XFS_BUF_SUPER_STALE(bp);
- xfs_buftrace("BUF_IODONE_CB", bp);
- xfs_buf_do_callbacks(bp, lip);
- XFS_BUF_SET_FSPRIVATE(bp, NULL);
- XFS_BUF_CLR_IODONE_FUNC(bp);
+ /*
+ * If we've already decided to shutdown the filesystem because of
+ * I/O errors, there's no point in giving this a retry.
+ */
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ xfs_buf_stale(bp);
+ XFS_BUF_DONE(bp);
+ trace_xfs_buf_item_iodone(bp, _RET_IP_);
+ goto do_callbacks;
+ }
- /*
- * XFS_SHUT flag gets set when we go thru the
- * entire buffer cache and deliberately start
- * throwing away delayed write buffers.
- * Since there's no biowait done on those,
- * we should just brelse them.
- */
- if (XFS_BUF_ISSHUT(bp)) {
- XFS_BUF_UNSHUT(bp);
- xfs_buf_relse(bp);
- } else {
- xfs_biodone(bp);
- }
+ if (bp->b_target != lasttarg ||
+ time_after(jiffies, (lasttime + 5*HZ))) {
+ lasttime = jiffies;
+ xfs_buf_ioerror_alert(bp, __func__);
+ }
+ lasttarg = bp->b_target;
- return;
- }
+ /*
+ * If the write was asynchronous then no one will be looking for the
+ * error. Clear the error state and write the buffer out again.
+ *
+ * XXX: This helps against transient write errors, but we need to find
+ * a way to shut the filesystem down if the writes keep failing.
+ *
+ * In practice we'll shut the filesystem down soon as non-transient
+ * erorrs tend to affect the whole device and a failing log write
+ * will make us give up. But we really ought to do better here.
+ */
+ if (XFS_BUF_ISASYNC(bp)) {
+ ASSERT(bp->b_iodone != NULL);
- if ((XFS_BUF_TARGET(bp) != lasttarg) ||
- (time_after(jiffies, (lasttime + 5*HZ)))) {
- lasttime = jiffies;
- cmn_err(CE_ALERT, "Device %s, XFS metadata write error"
- " block 0x%llx in %s",
- XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
- (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
- }
- lasttarg = XFS_BUF_TARGET(bp);
+ trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
- if (XFS_BUF_ISASYNC(bp)) {
- /*
- * If the write was asynchronous then noone will be
- * looking for the error. Clear the error state
- * and write the buffer out again delayed write.
- *
- * XXXsup This is OK, so long as we catch these
- * before we start the umount; we don't want these
- * DELWRI metadata bufs to be hanging around.
- */
- XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */
+ xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
- if (!(XFS_BUF_ISSTALE(bp))) {
- XFS_BUF_DELAYWRITE(bp);
- XFS_BUF_DONE(bp);
- XFS_BUF_SET_START(bp);
- }
- ASSERT(XFS_BUF_IODONE_FUNC(bp));
- xfs_buftrace("BUF_IODONE ASYNC", bp);
- xfs_buf_relse(bp);
+ if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
+ bp->b_flags |= XBF_WRITE | XBF_ASYNC |
+ XBF_DONE | XBF_WRITE_FAIL;
+ xfs_buf_iorequest(bp);
} else {
- /*
- * If the write of the buffer was not asynchronous,
- * then we want to make sure to return the error
- * to the caller of bwrite(). Because of this we
- * cannot clear the B_ERROR state at this point.
- * Instead we install a callback function that
- * will be called when the buffer is released, and
- * that routine will clear the error state and
- * set the buffer to be written out again after
- * some delay.
- */
- /* We actually overwrite the existing b-relse
- function at times, but we're gonna be shutting down
- anyway. */
- XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse);
- XFS_BUF_DONE(bp);
- XFS_BUF_V_IODONESEMA(bp);
+ xfs_buf_relse(bp);
}
+
return;
}
-#ifdef XFSERRORDEBUG
- xfs_buftrace("XFS BUFCB NOERR", bp);
-#endif
- xfs_buf_do_callbacks(bp, lip);
- XFS_BUF_SET_FSPRIVATE(bp, NULL);
- XFS_BUF_CLR_IODONE_FUNC(bp);
- xfs_biodone(bp);
-}
-/*
- * This is a callback routine attached to a buffer which gets an error
- * when being written out synchronously.
- */
-STATIC void
-xfs_buf_error_relse(
- xfs_buf_t *bp)
-{
- xfs_log_item_t *lip;
- xfs_mount_t *mp;
-
- lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
- mp = (xfs_mount_t *)lip->li_mountp;
- ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
-
- XFS_BUF_STALE(bp);
- XFS_BUF_DONE(bp);
- XFS_BUF_UNDELAYWRITE(bp);
- XFS_BUF_ERROR(bp,0);
- xfs_buftrace("BUF_ERROR_RELSE", bp);
- if (! XFS_FORCED_SHUTDOWN(mp))
- xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
/*
- * We have to unpin the pinned buffers so do the
- * callbacks.
+ * If the write of the buffer was synchronous, we want to make
+ * sure to return the error to the caller of xfs_bwrite().
*/
- xfs_buf_do_callbacks(bp, lip);
- XFS_BUF_SET_FSPRIVATE(bp, NULL);
- XFS_BUF_CLR_IODONE_FUNC(bp);
- XFS_BUF_SET_BRELSE_FUNC(bp,NULL);
- xfs_buf_relse(bp);
-}
+ xfs_buf_stale(bp);
+ XFS_BUF_DONE(bp);
+
+ trace_xfs_buf_error_relse(bp, _RET_IP_);
+do_callbacks:
+ xfs_buf_do_callbacks(bp);
+ bp->b_fspriv = NULL;
+ bp->b_iodone = NULL;
+ xfs_buf_ioend(bp, 0);
+}
/*
* This is the iodone() function for buffers which have been
@@ -1110,73 +1125,27 @@ xfs_buf_error_relse(
* It is called by xfs_buf_iodone_callbacks() above which will take
* care of cleaning up the buffer itself.
*/
-/* ARGSUSED */
void
xfs_buf_iodone(
- xfs_buf_t *bp,
- xfs_buf_log_item_t *bip)
+ struct xfs_buf *bp,
+ struct xfs_log_item *lip)
{
- struct xfs_mount *mp;
+ struct xfs_ail *ailp = lip->li_ailp;
- ASSERT(bip->bli_buf == bp);
+ ASSERT(BUF_ITEM(lip)->bli_buf == bp);
- mp = bip->bli_item.li_mountp;
+ xfs_buf_rele(bp);
/*
* If we are forcibly shutting down, this may well be
* off the AIL already. That's because we simulate the
* log-committed callbacks to unpin these buffers. Or we may never
* have put this item on AIL because of the transaction was
- * aborted forcibly. xfs_trans_delete_ail() takes care of these.
+ * aborted forcibly. xfs_trans_ail_delete() takes care of these.
*
* Either way, AIL is useless if we're forcing a shutdown.
*/
- spin_lock(&mp->m_ail_lock);
- /*
- * xfs_trans_delete_ail() drops the AIL lock.
- */
- xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip);
-
-#ifdef XFS_TRANS_DEBUG
- kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp));
- bip->bli_orig = NULL;
- kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY);
- bip->bli_logged = NULL;
-#endif /* XFS_TRANS_DEBUG */
-
-#ifdef XFS_BLI_TRACE
- ktrace_free(bip->bli_trace);
-#endif
- kmem_zone_free(xfs_buf_item_zone, bip);
-}
-
-#if defined(XFS_BLI_TRACE)
-void
-xfs_buf_item_trace(
- char *id,
- xfs_buf_log_item_t *bip)
-{
- xfs_buf_t *bp;
- ASSERT(bip->bli_trace != NULL);
-
- bp = bip->bli_buf;
- ktrace_enter(bip->bli_trace,
- (void *)id,
- (void *)bip->bli_buf,
- (void *)((unsigned long)bip->bli_flags),
- (void *)((unsigned long)bip->bli_recur),
- (void *)((unsigned long)atomic_read(&bip->bli_refcount)),
- (void *)((unsigned long)
- (0xFFFFFFFF & XFS_BUF_ADDR(bp) >> 32)),
- (void *)((unsigned long)(0xFFFFFFFF & XFS_BUF_ADDR(bp))),
- (void *)((unsigned long)XFS_BUF_COUNT(bp)),
- (void *)((unsigned long)XFS_BUF_BFLAGS(bp)),
- XFS_BUF_FSPRIVATE(bp, void *),
- XFS_BUF_FSPRIVATE2(bp, void *),
- (void *)(unsigned long)XFS_BUF_ISPINNED(bp),
- (void *)XFS_BUF_IODONE_FUNC(bp),
- (void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))),
- (void *)bip->bli_item.li_desc,
- (void *)((unsigned long)bip->bli_item.li_flags));
+ spin_lock(&ailp->xa_lock);
+ xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
+ xfs_buf_item_free(BUF_ITEM(lip));
}
-#endif /* XFS_BLI_TRACE */
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 5a41c348bb1..3f3455a4151 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -18,74 +18,33 @@
#ifndef __XFS_BUF_ITEM_H__
#define __XFS_BUF_ITEM_H__
-extern kmem_zone_t *xfs_buf_item_zone;
-
-/*
- * This is the structure used to lay out a buf log item in the
- * log. The data map describes which 128 byte chunks of the buffer
- * have been logged.
- * For 6.2 and beyond, this is XFS_LI_BUF. We use this to log everything.
- */
-typedef struct xfs_buf_log_format_t {
- unsigned short blf_type; /* buf log item type indicator */
- unsigned short blf_size; /* size of this item */
- ushort blf_flags; /* misc state */
- ushort blf_len; /* number of blocks in this buf */
- __int64_t blf_blkno; /* starting blkno of this buf */
- unsigned int blf_map_size; /* size of data bitmap in words */
- unsigned int blf_data_map[1];/* variable size bitmap of */
- /* regions of buffer in this item */
-} xfs_buf_log_format_t;
-
-/*
- * This flag indicates that the buffer contains on disk inodes
- * and requires special recovery handling.
- */
-#define XFS_BLI_INODE_BUF 0x1
-/*
- * This flag indicates that the buffer should not be replayed
- * during recovery because its blocks are being freed.
- */
-#define XFS_BLI_CANCEL 0x2
-/*
- * This flag indicates that the buffer contains on disk
- * user or group dquots and may require special recovery handling.
- */
-#define XFS_BLI_UDQUOT_BUF 0x4
-#define XFS_BLI_PDQUOT_BUF 0x8
-#define XFS_BLI_GDQUOT_BUF 0x10
+/* kernel only definitions */
-#define XFS_BLI_CHUNK 128
-#define XFS_BLI_SHIFT 7
-#define BIT_TO_WORD_SHIFT 5
-#define NBWORD (NBBY * sizeof(unsigned int))
-
-/*
- * buf log item flags
- */
+/* buf log item flags */
#define XFS_BLI_HOLD 0x01
#define XFS_BLI_DIRTY 0x02
#define XFS_BLI_STALE 0x04
#define XFS_BLI_LOGGED 0x08
#define XFS_BLI_INODE_ALLOC_BUF 0x10
#define XFS_BLI_STALE_INODE 0x20
+#define XFS_BLI_INODE_BUF 0x40
+#define XFS_BLI_ORDERED 0x80
+#define XFS_BLI_FLAGS \
+ { XFS_BLI_HOLD, "HOLD" }, \
+ { XFS_BLI_DIRTY, "DIRTY" }, \
+ { XFS_BLI_STALE, "STALE" }, \
+ { XFS_BLI_LOGGED, "LOGGED" }, \
+ { XFS_BLI_INODE_ALLOC_BUF, "INODE_ALLOC" }, \
+ { XFS_BLI_STALE_INODE, "STALE_INODE" }, \
+ { XFS_BLI_INODE_BUF, "INODE_BUF" }, \
+ { XFS_BLI_ORDERED, "ORDERED" }
-#ifdef __KERNEL__
struct xfs_buf;
-struct ktrace;
struct xfs_mount;
struct xfs_buf_log_item;
-#if defined(XFS_BLI_TRACE)
-#define XFS_BLI_TRACE_SIZE 32
-
-void xfs_buf_item_trace(char *, struct xfs_buf_log_item *);
-#else
-#define xfs_buf_item_trace(id, bip)
-#endif
-
/*
* This is the in core log item structure used to track information
* needed to log buffers. It tracks how many times the lock has been
@@ -97,27 +56,11 @@ typedef struct xfs_buf_log_item {
unsigned int bli_flags; /* misc flags */
unsigned int bli_recur; /* lock recursion count */
atomic_t bli_refcount; /* cnt of tp refs */
-#ifdef XFS_BLI_TRACE
- struct ktrace *bli_trace; /* event trace buf */
-#endif
-#ifdef XFS_TRANS_DEBUG
- char *bli_orig; /* original buffer copy */
- char *bli_logged; /* bytes logged (bitmap) */
-#endif
- xfs_buf_log_format_t bli_format; /* in-log header */
+ int bli_format_count; /* count of headers */
+ struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
+ struct xfs_buf_log_format __bli_format; /* embedded in-log header */
} xfs_buf_log_item_t;
-/*
- * This structure is used during recovery to record the buf log
- * items which have been canceled and should not be replayed.
- */
-typedef struct xfs_buf_cancel {
- xfs_daddr_t bc_blkno;
- uint bc_len;
- int bc_refcount;
- struct xfs_buf_cancel *bc_next;
-} xfs_buf_cancel_t;
-
void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
void xfs_buf_item_relse(struct xfs_buf *);
void xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
@@ -126,18 +69,8 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
void(*)(struct xfs_buf *, xfs_log_item_t *),
xfs_log_item_t *);
void xfs_buf_iodone_callbacks(struct xfs_buf *);
-void xfs_buf_iodone(struct xfs_buf *, xfs_buf_log_item_t *);
+void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
-#ifdef XFS_TRANS_DEBUG
-void
-xfs_buf_item_flush_log_debug(
- struct xfs_buf *bp,
- uint first,
- uint last);
-#else
-#define xfs_buf_item_flush_log_debug(bp, first, last)
-#endif
-
-#endif /* __KERNEL__ */
+extern kmem_zone_t *xfs_buf_item_zone;
#endif /* __XFS_BUF_ITEM_H__ */
diff --git a/fs/xfs/xfs_cksum.h b/fs/xfs/xfs_cksum.h
new file mode 100644
index 00000000000..fad1676ad8c
--- /dev/null
+++ b/fs/xfs/xfs_cksum.h
@@ -0,0 +1,63 @@
+#ifndef _XFS_CKSUM_H
+#define _XFS_CKSUM_H 1
+
+#define XFS_CRC_SEED (~(__uint32_t)0)
+
+/*
+ * Calculate the intermediate checksum for a buffer that has the CRC field
+ * inside it. The offset of the 32bit crc fields is passed as the
+ * cksum_offset parameter.
+ */
+static inline __uint32_t
+xfs_start_cksum(char *buffer, size_t length, unsigned long cksum_offset)
+{
+ __uint32_t zero = 0;
+ __uint32_t crc;
+
+ /* Calculate CRC up to the checksum. */
+ crc = crc32c(XFS_CRC_SEED, buffer, cksum_offset);
+
+ /* Skip checksum field */
+ crc = crc32c(crc, &zero, sizeof(__u32));
+
+ /* Calculate the rest of the CRC. */
+ return crc32c(crc, &buffer[cksum_offset + sizeof(__be32)],
+ length - (cksum_offset + sizeof(__be32)));
+}
+
+/*
+ * Convert the intermediate checksum to the final ondisk format.
+ *
+ * The CRC32c calculation uses LE format even on BE machines, but returns the
+ * result in host endian format. Hence we need to byte swap it back to LE format
+ * so that it is consistent on disk.
+ */
+static inline __le32
+xfs_end_cksum(__uint32_t crc)
+{
+ return ~cpu_to_le32(crc);
+}
+
+/*
+ * Helper to generate the checksum for a buffer.
+ */
+static inline void
+xfs_update_cksum(char *buffer, size_t length, unsigned long cksum_offset)
+{
+ __uint32_t crc = xfs_start_cksum(buffer, length, cksum_offset);
+
+ *(__le32 *)(buffer + cksum_offset) = xfs_end_cksum(crc);
+}
+
+/*
+ * Helper to verify the checksum for a buffer.
+ */
+static inline int
+xfs_verify_cksum(char *buffer, size_t length, unsigned long cksum_offset)
+{
+ __uint32_t crc = xfs_start_cksum(buffer, length, cksum_offset);
+
+ return *(__le32 *)(buffer + cksum_offset) == xfs_end_cksum(crc);
+}
+
+#endif /* _XFS_CKSUM_H */
diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h
deleted file mode 100644
index d5d1e60ee22..00000000000
--- a/fs/xfs/xfs_clnt.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_CLNT_H__
-#define __XFS_CLNT_H__
-
-/*
- * XFS arguments structure, constructed from the arguments we
- * are passed via the mount system call.
- *
- * NOTE: The mount system call is handled differently between
- * Linux and IRIX. In IRIX we worked work with a binary data
- * structure coming in across the syscall interface from user
- * space (the mount userspace knows about each filesystem type
- * and the set of valid options for it, and converts the users
- * argument string into a binary structure _before_ making the
- * system call), and the ABI issues that this implies.
- *
- * In Linux, we are passed a comma separated set of options;
- * ie. a NULL terminated string of characters. Userspace mount
- * code does not have any knowledge of mount options expected by
- * each filesystem type and so each filesystem parses its mount
- * options in kernel space.
- *
- * For the Linux port, we kept this structure pretty much intact
- * and use it internally (because the existing code groks it).
- */
-struct xfs_mount_args {
- int flags; /* flags -> see XFSMNT_... macros below */
- int flags2; /* flags -> see XFSMNT2_... macros below */
- int logbufs; /* Number of log buffers, -1 to default */
- int logbufsize; /* Size of log buffers, -1 to default */
- char fsname[MAXNAMELEN+1]; /* data device name */
- char rtname[MAXNAMELEN+1]; /* realtime device filename */
- char logname[MAXNAMELEN+1]; /* journal device filename */
- char mtpt[MAXNAMELEN+1]; /* filesystem mount point */
- int sunit; /* stripe unit (BBs) */
- int swidth; /* stripe width (BBs), multiple of sunit */
- uchar_t iosizelog; /* log2 of the preferred I/O size */
- int ihashsize; /* inode hash table size (buckets) */
-};
-
-/*
- * XFS mount option flags -- args->flags1
- */
-#define XFSMNT_ATTR2 0x00000001 /* allow ATTR2 EA format */
-#define XFSMNT_WSYNC 0x00000002 /* safe mode nfs mount
- * compatible */
-#define XFSMNT_INO64 0x00000004 /* move inode numbers up
- * past 2^32 */
-#define XFSMNT_UQUOTA 0x00000008 /* user quota accounting */
-#define XFSMNT_PQUOTA 0x00000010 /* IRIX prj quota accounting */
-#define XFSMNT_UQUOTAENF 0x00000020 /* user quota limit
- * enforcement */
-#define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit
- * enforcement */
-#define XFSMNT_QUIET 0x00000080 /* don't report mount errors */
-#define XFSMNT_NOALIGN 0x00000200 /* don't allocate at
- * stripe boundaries*/
-#define XFSMNT_RETERR 0x00000400 /* return error to user */
-#define XFSMNT_NORECOVERY 0x00000800 /* no recovery, implies
- * read-only mount */
-#define XFSMNT_SHARED 0x00001000 /* shared XFS mount */
-#define XFSMNT_IOSIZE 0x00002000 /* optimize for I/O size */
-#define XFSMNT_OSYNCISOSYNC 0x00004000 /* o_sync is REALLY o_sync */
- /* (osyncisdsync is default) */
-#define XFSMNT_32BITINODES 0x00200000 /* restrict inodes to 32
- * bits of address space */
-#define XFSMNT_GQUOTA 0x00400000 /* group quota accounting */
-#define XFSMNT_GQUOTAENF 0x00800000 /* group quota limit
- * enforcement */
-#define XFSMNT_NOUUID 0x01000000 /* Ignore fs uuid */
-#define XFSMNT_DMAPI 0x02000000 /* enable dmapi/xdsm */
-#define XFSMNT_BARRIER 0x04000000 /* use write barriers */
-#define XFSMNT_IKEEP 0x08000000 /* inode cluster delete */
-#define XFSMNT_SWALLOC 0x10000000 /* turn on stripe width
- * allocation */
-#define XFSMNT_DIRSYNC 0x40000000 /* sync creat,link,unlink,rename
- * symlink,mkdir,rmdir,mknod */
-#define XFSMNT_FLAGS2 0x80000000 /* more flags set in flags2 */
-
-/*
- * XFS mount option flags -- args->flags2
- */
-#define XFSMNT2_COMPAT_IOSIZE 0x00000001 /* don't report large preferred
- * I/O size in stat(2) */
-#define XFSMNT2_FILESTREAMS 0x00000002 /* enable the filestreams
- * allocator */
-
-#endif /* __XFS_CLNT_H__ */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c