diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-03 08:53:17 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-03 08:53:17 -0800 |
commit | eca281aad0c293e7698edea5834c252dd8108afa (patch) | |
tree | a78f3d0c092cf181bd13cffccece68e786ac56a9 /fs | |
parent | 7f5b09c15ab989ed5ce4adda0be42c1302df70b7 (diff) | |
parent | 9df5778ecee8b301b447fc05706792d5f447ace5 (diff) |
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2: (36 commits)
Ocfs2: Move ocfs2 ioctl definitions from ocfs2_fs.h to newly added ocfs2_ioctl.h
ocfs2: send SIGXFSZ if new filesize exceeds limit -v2
ocfs2/userdlm: Add tracing in userdlm
ocfs2: Use a separate masklog for AST and BASTs
dlm: allow dlm do recovery during shutdown
ocfs2: Only bug out in direct io write for reflinked extent.
ocfs2: fix warning in ocfs2_file_aio_write()
ocfs2_dlmfs: Enable the use of user cluster stacks.
ocfs2_dlmfs: Use the stackglue.
ocfs2_dlmfs: Don't honor truncate. The size of a dlmfs file is LVB_LEN
ocfs2: Pass the locking protocol into ocfs2_cluster_connect().
ocfs2: Remove the ast pointers from ocfs2_stack_plugins
ocfs2: Hang the locking proto on the cluster conn and use it in asts.
ocfs2: Attach the connection to the lksb
ocfs2: Pass lksbs back from stackglue ast/bast functions.
ocfs2_dlmfs: Move to its own directory
ocfs2_dlmfs: Use poll() to signify BASTs.
ocfs2_dlmfs: Add capabilities parameter.
ocfs2: Handle errors while setting external xattr values.
ocfs2: Set inline xattr entries with ocfs2_xa_set()
...
Diffstat (limited to 'fs')
31 files changed, 2014 insertions, 1592 deletions
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index 600d2d2ade1..791c0886c06 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile @@ -46,6 +46,7 @@ ocfs2_stackglue-objs := stackglue.o ocfs2_stack_o2cb-objs := stack_o2cb.o ocfs2_stack_user-objs := stack_user.o +obj-$(CONFIG_OCFS2_FS) += dlmfs/ # cluster/ is always needed when OCFS2_FS for masklog support obj-$(CONFIG_OCFS2_FS) += cluster/ obj-$(CONFIG_OCFS2_FS_O2CB) += dlm/ diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index d17bdc718f7..2bbe1ecc08c 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -1050,7 +1050,8 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle, strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); eb->h_blkno = cpu_to_le64(first_blkno); eb->h_fs_generation = cpu_to_le32(osb->fs_generation); - eb->h_suballoc_slot = cpu_to_le16(osb->slot_num); + eb->h_suballoc_slot = + cpu_to_le16(meta_ac->ac_alloc_slot); eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); eb->h_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb)); @@ -6037,7 +6038,7 @@ static void ocfs2_truncate_log_worker(struct work_struct *work) if (status < 0) mlog_errno(status); else - ocfs2_init_inode_steal_slot(osb); + ocfs2_init_steal_slots(osb); mlog_exit(status); } diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 7e9df11260f..4c2a6d282c4 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -577,8 +577,9 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, goto bail; } - /* We should already CoW the refcounted extent. */ - BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); + /* We should already CoW the refcounted extent in case of create. */ + BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED)); + /* * get_more_blocks() expects us to describe a hole by clearing * the mapped bit on bh_result(). diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c index 1cd2934de61..b39da877b12 100644 --- a/fs/ocfs2/cluster/masklog.c +++ b/fs/ocfs2/cluster/masklog.c @@ -112,6 +112,7 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { define_mask(XATTR), define_mask(QUOTA), define_mask(REFCOUNT), + define_mask(BASTS), define_mask(ERROR), define_mask(NOTICE), define_mask(KTHREAD), diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index 9b4d11726cf..3dfddbec32f 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h @@ -114,6 +114,7 @@ #define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */ #define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */ #define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */ +#define ML_BASTS 0x0000001000000000ULL /* dlmglue asts and basts */ /* bits that are infrequently given and frequently matched in the high word */ #define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */ #define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */ @@ -194,9 +195,9 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits; * previous token if args expands to nothing. */ #define __mlog_printk(level, fmt, args...) \ - printk(level "(%u,%lu):%s:%d " fmt, task_pid_nr(current), \ - __mlog_cpu_guess, __PRETTY_FUNCTION__, __LINE__ , \ - ##args) + printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \ + task_pid_nr(current), __mlog_cpu_guess, \ + __PRETTY_FUNCTION__, __LINE__ , ##args) #define mlog(mask, fmt, args...) do { \ u64 __m = MLOG_MASK_PREFIX | (mask); \ diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 28c3ec23879..765d66c7098 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -2439,7 +2439,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb, dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; memset(dx_root, 0, osb->sb->s_blocksize); strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE); - dx_root->dr_suballoc_slot = cpu_to_le16(osb->slot_num); + dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit); dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation); dx_root->dr_blkno = cpu_to_le64(dr_blkno); diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile index 19036137570..dcebf0d920f 100644 --- a/fs/ocfs2/dlm/Makefile +++ b/fs/ocfs2/dlm/Makefile @@ -1,8 +1,7 @@ EXTRA_CFLAGS += -Ifs/ocfs2 -obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlmfs.o +obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \ dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o -ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 344bcf90cbf..b4f99de2caf 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -310,7 +310,7 @@ static int dlm_recovery_thread(void *data) mlog(0, "dlm thread running for %s...\n", dlm->name); while (!kthread_should_stop()) { - if (dlm_joined(dlm)) { + if (dlm_domain_fully_joined(dlm)) { status = dlm_do_recovery(dlm); if (status == -EAGAIN) { /* do not sleep, recheck immediately. */ diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile new file mode 100644 index 00000000000..df69b4856d0 --- /dev/null +++ b/fs/ocfs2/dlmfs/Makefile @@ -0,0 +1,5 @@ +EXTRA_CFLAGS += -Ifs/ocfs2 + +obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o + +ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 02bf17808bd..1b0de157a08 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -43,24 +43,17 @@ #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> +#include <linux/poll.h> #include <asm/uaccess.h> - -#include "cluster/nodemanager.h" -#include "cluster/heartbeat.h" -#include "cluster/tcp.h" - -#include "dlmapi.h" - +#include "stackglue.h" #include "userdlm.h" - #include "dlmfsver.h" #define MLOG_MASK_PREFIX ML_DLMFS #include "cluster/masklog.h" -#include "ocfs2_lockingver.h" static const struct super_operations dlmfs_ops; static const struct file_operations dlmfs_file_operations; @@ -71,15 +64,46 @@ static struct kmem_cache *dlmfs_inode_cache; struct workqueue_struct *user_dlm_worker; + + /* - * This is the userdlmfs locking protocol version. + * These are the ABI capabilities of dlmfs. + * + * Over time, dlmfs has added some features that were not part of the + * initial ABI. Unfortunately, some of these features are not detectable + * via standard usage. For example, Linux's default poll always returns + * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs + * added poll support. Instead, we provide this list of new capabilities. + * + * Capabilities is a read-only attribute. We do it as a module parameter + * so we can discover it whether dlmfs is built in, loaded, or even not + * loaded. * - * See fs/ocfs2/dlmglue.c for more details on locking versions. + * The ABI features are local to this machine's dlmfs mount. This is + * distinct from the locking protocol, which is concerned with inter-node + * interaction. + * + * Capabilities: + * - bast : POLLIN against the file descriptor of a held lock + * signifies a bast fired on the lock. */ -static const struct dlm_protocol_version user_locking_protocol = { - .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, - .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, -}; +#define DLMFS_CAPABILITIES "bast stackglue" +extern int param_set_dlmfs_capabilities(const char *val, + struct kernel_param *kp) +{ + printk(KERN_ERR "%s: readonly parameter\n", kp->name); + return -EINVAL; +} +static int param_get_dlmfs_capabilities(char *buffer, + struct kernel_param *kp) +{ + return strlcpy(buffer, DLMFS_CAPABILITIES, + strlen(DLMFS_CAPABILITIES) + 1); +} +module_param_call(capabilities, param_set_dlmfs_capabilities, + param_get_dlmfs_capabilities, NULL, 0444); +MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES); + /* * decodes a set of open flags into a valid lock level and a set of flags. @@ -179,13 +203,46 @@ static int dlmfs_file_release(struct inode *inode, return 0; } +/* + * We do ->setattr() just to override size changes. Our size is the size + * of the LVB and nothing else. + */ +static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr) +{ + int error; + struct inode *inode = dentry->d_inode; + + attr->ia_valid &= ~ATTR_SIZE; + error = inode_change_ok(inode, attr); + if (!error) + error = inode_setattr(inode, attr); + + return error; +} + +static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait) +{ + int event = 0; + struct inode *inode = file->f_path.dentry->d_inode; + struct dlmfs_inode_private *ip = DLMFS_I(inode); + + poll_wait(file, &ip->ip_lockres.l_event, wait); + + spin_lock(&ip->ip_lockres.l_lock); + if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED) + event = POLLIN | POLLRDNORM; + spin_unlock(&ip->ip_lockres.l_lock); + + return event; +} + static ssize_t dlmfs_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { int bytes_left; - ssize_t readlen; + ssize_t readlen, got; char *lvb_buf; struct inode *inode = filp->f_path.dentry->d_inode; @@ -211,9 +268,13 @@ static ssize_t dlmfs_file_read(struct file *filp, if (!lvb_buf) return -ENOMEM; - user_dlm_read_lvb(inode, lvb_buf, readlen); - bytes_left = __copy_to_user(buf, lvb_buf, readlen); - readlen -= bytes_left; + got = user_dlm_read_lvb(inode, lvb_buf, readlen); + if (got) { + BUG_ON(got != readlen); + bytes_left = __copy_to_user(buf, lvb_buf, readlen); + readlen -= bytes_left; + } else + readlen = 0; kfree(lvb_buf); @@ -272,7 +333,7 @@ static void dlmfs_init_once(void *foo) struct dlmfs_inode_private *ip = (struct dlmfs_inode_private *) foo; - ip->ip_dlm = NULL; + ip->ip_conn = NULL; ip->ip_parent = NULL; inode_init_once(&ip->ip_vfs_inode); @@ -314,14 +375,14 @@ static void dlmfs_clear_inode(struct inode *inode) goto clear_fields; } - mlog(0, "we're a directory, ip->ip_dlm = 0x%p\n", ip->ip_dlm); + mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn); /* we must be a directory. If required, lets unregister the * dlm context now. */ - if (ip->ip_dlm) - user_dlm_unregister_context(ip->ip_dlm); + if (ip->ip_conn) + user_dlm_unregister(ip->ip_conn); clear_fields: ip->ip_parent = NULL; - ip->ip_dlm = NULL; + ip->ip_conn = NULL; } static struct backing_dev_info dlmfs_backing_dev_info = { @@ -371,7 +432,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent, inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ip = DLMFS_I(inode); - ip->ip_dlm = DLMFS_I(parent)->ip_dlm; + ip->ip_conn = DLMFS_I(parent)->ip_conn; switch (mode & S_IFMT) { default: @@ -425,13 +486,12 @@ static int dlmfs_mkdir(struct inode * dir, struct inode *inode = NULL; struct qstr *domain = &dentry->d_name; struct dlmfs_inode_private *ip; - struct dlm_ctxt *dlm; - struct dlm_protocol_version proto = user_locking_protocol; + struct ocfs2_cluster_connection *conn; mlog(0, "mkdir %.*s\n", domain->len, domain->name); /* verify that we have a proper domain */ - if (domain->len >= O2NM_MAX_NAME_LEN) { + if (domain->len >= GROUP_NAME_MAX) { status = -EINVAL; mlog(ML_ERROR, "invalid domain name for directory.\n"); goto bail; @@ -446,14 +506,14 @@ static int dlmfs_mkdir(struct inode * dir, ip = DLMFS_I(inode); - dlm = user_dlm_register_context(domain, &proto); - if (IS_ERR(dlm)) { - status = PTR_ERR(dlm); + conn = user_dlm_register(domain); + if (IS_ERR(conn)) { + status = PTR_ERR(conn); mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", status, domain->len, domain->name); goto bail; } - ip->ip_dlm = dlm; + ip->ip_conn = conn; inc_nlink(dir); d_instantiate(dentry, inode); @@ -549,6 +609,7 @@ static int dlmfs_fill_super(struct super_block * sb, static const struct file_operations dlmfs_file_operations = { .open = dlmfs_file_open, .release = dlmfs_file_release, + .poll = dlmfs_file_poll, .read = dlmfs_file_read, .write = dlmfs_file_write, }; @@ -576,6 +637,7 @@ static const struct super_operations dlmfs_ops = { static const struct inode_operations dlmfs_file_inode_operations = { .getattr = simple_getattr, + .setattr = dlmfs_file_setattr, }; static int dlmfs_get_sb(struct file_system_type *fs_type, @@ -620,6 +682,7 @@ static int __init init_dlmfs_fs(void) } cleanup_worker = 1; + user_dlm_set_locking_protocol(); status = register_filesystem(&dlmfs_fs_type); bail: if (status) { diff --git a/fs/ocfs2/dlm/dlmfsver.c b/fs/ocfs2/dlmfs/dlmfsver.c index a733b3321f8..a733b3321f8 100644 --- a/fs/ocfs2/dlm/dlmfsver.c +++ b/fs/ocfs2/dlmfs/dlmfsver.c diff --git a/fs/ocfs2/dlm/dlmfsver.h b/fs/ocfs2/dlmfs/dlmfsver.h index f35eadbed25..f35eadbed25 100644 --- a/fs/ocfs2/dlm/dlmfsver.h +++ b/fs/ocfs2/dlmfs/dlmfsver.h diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c index 4cb1d3dae25..0499e3fb7bd 100644 --- a/fs/ocfs2/dlm/userdlm.c +++ b/fs/ocfs2/dlmfs/userdlm.c @@ -34,18 +34,19 @@ #include <linux/types.h> #include <linux/crc32.h> - -#include "cluster/nodemanager.h" -#include "cluster/heartbeat.h" -#include "cluster/tcp.h" - -#include "dlmapi.h" - +#include "ocfs2_lockingver.h" +#include "stackglue.h" #include "userdlm.h" #define MLOG_MASK_PREFIX ML_DLMFS #include "cluster/masklog.h" + +static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb) +{ + return container_of(lksb, struct user_lock_res, l_lksb); +} + static inline int user_check_wait_flag(struct user_lock_res *lockres, int flag) { @@ -73,15 +74,15 @@ static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres) } /* I heart container_of... */ -static inline struct dlm_ctxt * -dlm_ctxt_from_user_lockres(struct user_lock_res *lockres) +static inline struct ocfs2_cluster_connection * +cluster_connection_from_user_lockres(struct user_lock_res *lockres) { struct dlmfs_inode_private *ip; ip = container_of(lockres, struct dlmfs_inode_private, ip_lockres); - return ip->ip_dlm; + return ip->ip_conn; } static struct inode * @@ -103,9 +104,9 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) } #define user_log_dlm_error(_func, _stat, _lockres) do { \ - mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \ - "resource %.*s: %s\n", dlm_errname(_stat), _func, \ - _lockres->l_namelen, _lockres->l_name, dlm_errmsg(_stat)); \ + mlog(ML_ERROR, "Dlm error %d while calling %s on " \ + "resource %.*s\n", _stat, _func, \ + _lockres->l_namelen, _lockres->l_name); \ } while (0) /* WARNING: This function lives in a world where the only three lock @@ -113,34 +114,35 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) * lock types are added. */ static inline int user_highest_compat_lock_level(int level) { - int new_level = LKM_EXMODE; + int new_level = DLM_LOCK_EX; - if (level == LKM_EXMODE) - new_level = LKM_NLMODE; - else if (level == LKM_PRMODE) - new_level = LKM_PRMODE; + if (level == DLM_LOCK_EX) + new_level = DLM_LOCK_NL; + else if (level == DLM_LOCK_PR) + new_level = DLM_LOCK_PR; return new_level; } -static void user_ast(void *opaque) +static void user_ast(struct ocfs2_dlm_lksb *lksb) { - struct user_lock_res *lockres = opaque; - struct dlm_lockstatus *lksb; + struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); + int status; - mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen, - lockres->l_name); + mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n", + lockres->l_namelen, lockres->l_name, lockres->l_level, + lockres->l_requested); spin_lock(&lockres->l_lock); - lksb = &(lockres->l_lksb); - if (lksb->status != DLM_NORMAL) { + status = ocfs2_dlm_lock_status(&lockres->l_lksb); + if (status) { mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", - lksb->status, lockres->l_namelen, lockres->l_name); + status, lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); return; } - mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE, + mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV, "Lockres %.*s, requested ivmode. flags 0x%x\n", lockres->l_namelen, lockres->l_name, lockres->l_flags); @@ -148,13 +150,13 @@ static void user_ast(void *opaque) if (lockres->l_requested < lockres->l_level) { if (lockres->l_requested <= user_highest_compat_lock_level(lockres->l_blocking)) { - lockres->l_blocking = LKM_NLMODE; + lockres->l_blocking = DLM_LOCK_NL; lockres->l_flags &= ~USER_LOCK_BLOCKED; } } lockres->l_level = lockres->l_requested; - lockres->l_requested = LKM_IVMODE; + lockres->l_requested = DLM_LOCK_IV; lockres->l_flags |= USER_LOCK_ATTACHED; lockres->l_flags &= ~USER_LOCK_BUSY; @@ -193,11 +195,11 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres) return; switch (lockres->l_blocking) { - case LKM_EXMODE: + case DLM_LOCK_EX: if (!lockres->l_ex_holders && !lockres->l_ro_holders) queue = 1; break; - case LKM_PRMODE: + case DLM_LOCK_PR: if (!lockres->l_ex_holders) queue = 1; break; @@ -209,12 +211,12 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres) __user_dlm_queue_lockres(lockres); } -static void user_bast(void *opaque, int level) +static void user_bast(struct ocfs2_dlm_lksb *lksb, int level) { - struct user_lock_res *lockres = opaque; + struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); - mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n", - lockres->l_namelen, lockres->l_name, level); + mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n", + lockres->l_namelen, lockres->l_name, level, lockres->l_level); spin_lock(&lockres->l_lock); lockres->l_flags |= USER_LOCK_BLOCKED; @@ -227,15 +229,15 @@ static void user_bast(void *opaque, int level) wake_up(&lockres->l_event); } -static void user_unlock_ast(void *opaque, enum dlm_status status) +static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status) { - struct user_lock_res *lockres = opaque; + struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); - mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen, - lockres->l_name); + mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n", + lockres->l_namelen, lockres->l_name, lockres->l_flags); - if (status != DLM_NORMAL && status != DLM_CANCELGRANT) - mlog(ML_ERROR, "Dlm returns status %d\n", status); + if (status) + mlog(ML_ERROR, "dlm returns status %d\n", status); spin_lock(&lockres->l_lock); /* The teardown flag gets set early during the unlock process, @@ -243,7 +245,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status) * for a concurrent cancel. */ if (lockres->l_flags & USER_LOCK_IN_TEARDOWN && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { - lockres->l_level = LKM_IVMODE; + lockres->l_level = DLM_LOCK_IV; } else if (status == DLM_CANCELGRANT) { /* We tried to cancel a convert request, but it was * already granted. Don't clear the busy flag - the @@ -254,7 +256,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status) } else { BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); /* Cancel succeeded, we want to re-queue */ - lockres->l_requested = LKM_IVMODE; /* cancel an + lockres->l_requested = DLM_LOCK_IV; /* cancel an * upconvert * request. */ lockres->l_flags &= ~USER_LOCK_IN_CANCEL; @@ -271,6 +273,21 @@ out_noclear: wake_up(&lockres->l_event); } +/* + * This is the userdlmfs locking protocol version. + * + * See fs/ocfs2/dlmglue.c for more details on locking versions. + */ +static struct ocfs2_locking_protocol user_dlm_lproto = { + .lp_max_version = { + .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, + .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, + }, + .lp_lock_ast = user_ast, + .lp_blocking_ast = user_bast, + .lp_unlock_ast = user_unlock_ast, +}; + static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) { struct inode *inode; @@ -283,10 +300,10 @@ static void user_dlm_unblock_lock(struct work_struct *work) int new_level, status; struct user_lock_res *lockres = container_of(work, struct user_lock_res, l_work); - struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); + struct ocfs2_cluster_connection *conn = + cluster_connection_from_user_lockres(lockres); - mlog(0, "processing lockres %.*s\n", lockres->l_namelen, - lockres->l_name); + mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); spin_lock(&lockres->l_lock); @@ -304,17 +321,23 @@ static void user_dlm_unblock_lock(struct work_struct *work) * flag, and finally we might get another bast which re-queues * us before our ast for the downconvert is called. */ if (!(lockres->l_flags & USER_LOCK_BLOCKED)) { + mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n", + lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); goto drop_ref; } if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { + mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n", + lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); goto drop_ref; } if (lockres->l_flags & USER_LOCK_BUSY) { if (lockres->l_flags & USER_LOCK_IN_CANCEL) { + mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n", + lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); goto drop_ref; } @@ -322,32 +345,31 @@ static void user_dlm_unblock_lock(struct work_struct *work) lockres->l_flags |= USER_LOCK_IN_CANCEL; spin_unlock(&lockres->l_lock); - status = dlmunlock(dlm, - &lockres->l_lksb, - LKM_CANCEL, - user_unlock_ast, - lockres); - if (status != DLM_NORMAL) - user_log_dlm_error("dlmunlock", status, lockres); + status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, + DLM_LKF_CANCEL); + if (status) + user_log_dlm_error("ocfs2_dlm_unlock", status, lockres); goto drop_ref; } /* If there are still incompat holders, we can exit safely * without worrying about re-queueing this lock as that will * happen on the last call to user_cluster_unlock. */ - if ((lockres->l_blocking == LKM_EXMODE) + if ((lockres->l_blocking == DLM_LOCK_EX) && (lockres->l_ex_holders || lockres->l_ro_holders)) { spin_unlock(&lockres->l_lock); - mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n", - lockres->l_ro_holders, lockres->l_ex_holders); + mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n", + lockres->l_namelen, lockres->l_name, + lockres->l_ex_holders, lockres->l_ro_holders); goto drop_ref; } - if ((lockres->l_blocking == LKM_PRMODE) + if ((lockres->l_blocking == DLM_LOCK_PR) && lockres->l_ex_holders) { spin_unlock(&lockres->l_lock); - mlog(0, "can't downconvert for pr: ex = %u\n", - lockres->l_ex_holders); + mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n", + lockres->l_namelen, lockres->l_name, + lockres->l_ex_holders); goto drop_ref; } @@ -355,22 +377,17 @@ static void user_dlm_unblock_lock(struct work_struct *work) new_level = user_highest_compat_lock_level(lockres->l_blocking); lockres->l_requested = new_level; lockres->l_flags |= USER_LOCK_BUSY; - mlog(0, "Downconvert lock from %d to %d\n", - lockres->l_level, new_level); + mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n", + lockres->l_namelen, lockres->l_name, lockres->l_level, new_level); spin_unlock(&lockres->l_lock); /* need lock downconvert request now... */ - status = dlmlock(dlm, - new_level, - &lockres->l_lksb, - LKM_CONVERT|LKM_VALBLK, - lockres->l_name, - lockres->l_namelen, - user_ast, - lockres, - user_bast); - if (status != DLM_NORMAL) { - user_log_dlm_error("dlmlock", status, lockres); + status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb, + DLM_LKF_CONVERT|DLM_LKF_VALBLK, + lockres->l_name, + lockres->l_namelen); + if (status) { + user_log_dlm_error("ocfs2_dlm_lock", status, lockres); user_recover_from_dlm_error(lockres); } @@ -382,10 +399,10 @@ static inline void user_dlm_inc_holders(struct user_lock_res *lockres, int level) { switch(level) { - case LKM_EXMODE: + case DLM_LOCK_EX: lockres->l_ex_holders++; break; - case LKM_PRMODE: + case DLM_LOCK_PR: lockres->l_ro_holders++; break; default: @@ -410,20 +427,19 @@ int user_dlm_cluster_lock(struct user_lock_res *lockres, int lkm_flags) { int status, local_flags; - struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); + struct ocfs2_cluster_connection *conn = + cluster_connection_from_user_lockres(lockres); - if (level != LKM_EXMODE && - level != LKM_PRMODE) { + if (level != DLM_LOCK_EX && + level != DLM_LOCK_PR) { mlog(ML_ERROR, "lockres %.*s: invalid request!\n", lockres->l_namelen, lockres->l_name); status = -EINVAL; goto bail; } - mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n", - lockres->l_namelen, lockres->l_name, - (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE", - lkm_flags); + mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n", + lockres->l_namelen, lockres->l_name, level, lkm_flags); |