diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
| -rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 461 | 
1 files changed, 352 insertions, 109 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index bd41df95b6f..afa873bd028 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -35,11 +35,16 @@  #include <linux/random.h>  #include <linux/debugfs.h>  #include <linux/export.h> +#include <linux/delay.h>  #include <rdma/ib_umem.h>  #include "mlx5_ib.h"  enum { -	DEF_CACHE_SIZE	= 10, +	MAX_PENDING_REG_MR = 8, +}; + +enum { +	MLX5_UMR_ALIGN	= 2048  };  static __be64 *mr_align(__be64 *ptr, int align) @@ -59,15 +64,67 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)  		return order - cache->ent[0].order;  } +static void reg_mr_callback(int status, void *context) +{ +	struct mlx5_ib_mr *mr = context; +	struct mlx5_ib_dev *dev = mr->dev; +	struct mlx5_mr_cache *cache = &dev->cache; +	int c = order2idx(dev, mr->order); +	struct mlx5_cache_ent *ent = &cache->ent[c]; +	u8 key; +	unsigned long flags; +	struct mlx5_mr_table *table = &dev->mdev.priv.mr_table; +	int err; + +	spin_lock_irqsave(&ent->lock, flags); +	ent->pending--; +	spin_unlock_irqrestore(&ent->lock, flags); +	if (status) { +		mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); +		kfree(mr); +		dev->fill_delay = 1; +		mod_timer(&dev->delay_timer, jiffies + HZ); +		return; +	} + +	if (mr->out.hdr.status) { +		mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n", +			     mr->out.hdr.status, +			     be32_to_cpu(mr->out.hdr.syndrome)); +		kfree(mr); +		dev->fill_delay = 1; +		mod_timer(&dev->delay_timer, jiffies + HZ); +		return; +	} + +	spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags); +	key = dev->mdev.priv.mkey_key++; +	spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags); +	mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key; + +	cache->last_add = jiffies; + +	spin_lock_irqsave(&ent->lock, flags); +	list_add_tail(&mr->list, &ent->head); +	ent->cur++; +	ent->size++; +	spin_unlock_irqrestore(&ent->lock, flags); + +	write_lock_irqsave(&table->lock, flags); +	err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key), +				&mr->mmr); +	if (err) +		pr_err("Error inserting to mr tree. 0x%x\n", -err); +	write_unlock_irqrestore(&table->lock, flags); +} +  static int add_keys(struct mlx5_ib_dev *dev, int c, int num)  { -	struct device *ddev = dev->ib_dev.dma_device;  	struct mlx5_mr_cache *cache = &dev->cache;  	struct mlx5_cache_ent *ent = &cache->ent[c];  	struct mlx5_create_mkey_mbox_in *in;  	struct mlx5_ib_mr *mr;  	int npages = 1 << ent->order; -	int size = sizeof(u64) * npages;  	int err = 0;  	int i; @@ -76,87 +133,66 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)  		return -ENOMEM;  	for (i = 0; i < num; i++) { +		if (ent->pending >= MAX_PENDING_REG_MR) { +			err = -EAGAIN; +			break; +		} +  		mr = kzalloc(sizeof(*mr), GFP_KERNEL);  		if (!mr) {  			err = -ENOMEM; -			goto out; +			break;  		}  		mr->order = ent->order;  		mr->umred = 1; -		mr->pas = kmalloc(size + 0x3f, GFP_KERNEL); -		if (!mr->pas) { -			kfree(mr); -			err = -ENOMEM; -			goto out; -		} -		mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size, -					 DMA_TO_DEVICE); -		if (dma_mapping_error(ddev, mr->dma)) { -			kfree(mr->pas); -			kfree(mr); -			err = -ENOMEM; -			goto out; -		} - +		mr->dev = dev;  		in->seg.status = 1 << 6;  		in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);  		in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);  		in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;  		in->seg.log2_page_size = 12; +		spin_lock_irq(&ent->lock); +		ent->pending++; +		spin_unlock_irq(&ent->lock);  		err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, -					    sizeof(*in)); +					    sizeof(*in), reg_mr_callback, +					    mr, &mr->out);  		if (err) {  			mlx5_ib_warn(dev, "create mkey failed %d\n", err); -			dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); -			kfree(mr->pas);  			kfree(mr); -			goto out; +			break;  		} -		cache->last_add = jiffies; - -		spin_lock(&ent->lock); -		list_add_tail(&mr->list, &ent->head); -		ent->cur++; -		ent->size++; -		spin_unlock(&ent->lock);  	} -out:  	kfree(in);  	return err;  }  static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)  { -	struct device *ddev = dev->ib_dev.dma_device;  	struct mlx5_mr_cache *cache = &dev->cache;  	struct mlx5_cache_ent *ent = &cache->ent[c];  	struct mlx5_ib_mr *mr; -	int size;  	int err;  	int i;  	for (i = 0; i < num; i++) { -		spin_lock(&ent->lock); +		spin_lock_irq(&ent->lock);  		if (list_empty(&ent->head)) { -			spin_unlock(&ent->lock); +			spin_unlock_irq(&ent->lock);  			return;  		}  		mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);  		list_del(&mr->list);  		ent->cur--;  		ent->size--; -		spin_unlock(&ent->lock); +		spin_unlock_irq(&ent->lock);  		err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); -		if (err) { +		if (err)  			mlx5_ib_warn(dev, "failed destroy mkey\n"); -		} else { -			size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); -			dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); -			kfree(mr->pas); +		else  			kfree(mr); -		}  	}  } @@ -183,9 +219,13 @@ static ssize_t size_write(struct file *filp, const char __user *buf,  		return -EINVAL;  	if (var > ent->size) { -		err = add_keys(dev, c, var - ent->size); -		if (err) -			return err; +		do { +			err = add_keys(dev, c, var - ent->size); +			if (err && err != -EAGAIN) +				return err; + +			usleep_range(3000, 5000); +		} while (err);  	} else if (var < ent->size) {  		remove_keys(dev, c, ent->size - var);  	} @@ -301,23 +341,37 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)  	struct mlx5_ib_dev *dev = ent->dev;  	struct mlx5_mr_cache *cache = &dev->cache;  	int i = order2idx(dev, ent->order); +	int err;  	if (cache->stopped)  		return;  	ent = &dev->cache.ent[i]; -	if (ent->cur < 2 * ent->limit) { -		add_keys(dev, i, 1); -		if (ent->cur < 2 * ent->limit) -			queue_work(cache->wq, &ent->work); +	if (ent->cur < 2 * ent->limit && !dev->fill_delay) { +		err = add_keys(dev, i, 1); +		if (ent->cur < 2 * ent->limit) { +			if (err == -EAGAIN) { +				mlx5_ib_dbg(dev, "returned eagain, order %d\n", +					    i + 2); +				queue_delayed_work(cache->wq, &ent->dwork, +						   msecs_to_jiffies(3)); +			} else if (err) { +				mlx5_ib_warn(dev, "command failed order %d, err %d\n", +					     i + 2, err); +				queue_delayed_work(cache->wq, &ent->dwork, +						   msecs_to_jiffies(1000)); +			} else { +				queue_work(cache->wq, &ent->work); +			} +		}  	} else if (ent->cur > 2 * ent->limit) {  		if (!someone_adding(cache) && -		    time_after(jiffies, cache->last_add + 60 * HZ)) { +		    time_after(jiffies, cache->last_add + 300 * HZ)) {  			remove_keys(dev, i, 1);  			if (ent->cur > ent->limit)  				queue_work(cache->wq, &ent->work);  		} else { -			queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ); +			queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);  		}  	}  } @@ -357,18 +411,18 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)  		mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); -		spin_lock(&ent->lock); +		spin_lock_irq(&ent->lock);  		if (!list_empty(&ent->head)) {  			mr = list_first_entry(&ent->head, struct mlx5_ib_mr,  					      list);  			list_del(&mr->list);  			ent->cur--; -			spin_unlock(&ent->lock); +			spin_unlock_irq(&ent->lock);  			if (ent->cur < ent->limit)  				queue_work(cache->wq, &ent->work);  			break;  		} -		spin_unlock(&ent->lock); +		spin_unlock_irq(&ent->lock);  		queue_work(cache->wq, &ent->work); @@ -395,12 +449,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)  		return;  	}  	ent = &cache->ent[c]; -	spin_lock(&ent->lock); +	spin_lock_irq(&ent->lock);  	list_add_tail(&mr->list, &ent->head);  	ent->cur++;  	if (ent->cur > 2 * ent->limit)  		shrink = 1; -	spin_unlock(&ent->lock); +	spin_unlock_irq(&ent->lock);  	if (shrink)  		queue_work(cache->wq, &ent->work); @@ -408,33 +462,28 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)  static void clean_keys(struct mlx5_ib_dev *dev, int c)  { -	struct device *ddev = dev->ib_dev.dma_device;  	struct mlx5_mr_cache *cache = &dev->cache;  	struct mlx5_cache_ent *ent = &cache->ent[c];  	struct mlx5_ib_mr *mr; -	int size;  	int err; +	cancel_delayed_work(&ent->dwork);  	while (1) { -		spin_lock(&ent->lock); +		spin_lock_irq(&ent->lock);  		if (list_empty(&ent->head)) { -			spin_unlock(&ent->lock); +			spin_unlock_irq(&ent->lock);  			return;  		}  		mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);  		list_del(&mr->list);  		ent->cur--;  		ent->size--; -		spin_unlock(&ent->lock); +		spin_unlock_irq(&ent->lock);  		err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); -		if (err) { +		if (err)  			mlx5_ib_warn(dev, "failed destroy mkey\n"); -		} else { -			size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); -			dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); -			kfree(mr->pas); +		else  			kfree(mr); -		}  	}  } @@ -490,12 +539,18 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)  	debugfs_remove_recursive(dev->cache.root);  } +static void delay_time_func(unsigned long ctx) +{ +	struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx; + +	dev->fill_delay = 0; +} +  int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)  {  	struct mlx5_mr_cache *cache = &dev->cache;  	struct mlx5_cache_ent *ent;  	int limit; -	int size;  	int err;  	int i; @@ -505,6 +560,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)  		return -ENOMEM;  	} +	setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);  	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {  		INIT_LIST_HEAD(&cache->ent[i].head);  		spin_lock_init(&cache->ent[i].lock); @@ -515,13 +571,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)  		ent->order = i + 2;  		ent->dev = dev; -		if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) { -			size = dev->mdev.profile->mr_cache[i].size; +		if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE)  			limit = dev->mdev.profile->mr_cache[i].limit; -		} else { -			size = DEF_CACHE_SIZE; +		else  			limit = 0; -		} +  		INIT_WORK(&ent->work, cache_work_func);  		INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);  		ent->limit = limit; @@ -540,13 +594,16 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)  	int i;  	dev->cache.stopped = 1; -	destroy_workqueue(dev->cache.wq); +	flush_workqueue(dev->cache.wq);  	mlx5_mr_cache_debugfs_cleanup(dev);  	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)  		clean_keys(dev, i); +	destroy_workqueue(dev->cache.wq); +	del_timer_sync(&dev->delay_timer); +  	return 0;  } @@ -575,7 +632,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)  	seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);  	seg->start_addr = 0; -	err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in)); +	err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL, +				    NULL);  	if (err)  		goto err_in; @@ -650,7 +708,7 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,  void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)  { -	struct mlx5_ib_mr *mr; +	struct mlx5_ib_umr_context *context;  	struct ib_wc wc;  	int err; @@ -663,9 +721,9 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)  		if (err == 0)  			break; -		mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id; -		mr->status = wc.status; -		complete(&mr->done); +		context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id; +		context->status = wc.status; +		complete(&context->done);  	}  	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);  } @@ -675,21 +733,24 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,  				  int page_shift, int order, int access_flags)  {  	struct mlx5_ib_dev *dev = to_mdev(pd->device); +	struct device *ddev = dev->ib_dev.dma_device;  	struct umr_common *umrc = &dev->umrc; +	struct mlx5_ib_umr_context umr_context;  	struct ib_send_wr wr, *bad;  	struct mlx5_ib_mr *mr;  	struct ib_sge sg; -	int err; +	int size = sizeof(u64) * npages; +	int err = 0;  	int i; -	for (i = 0; i < 10; i++) { +	for (i = 0; i < 1; i++) {  		mr = alloc_cached_mr(dev, order);  		if (mr)  			break;  		err = add_keys(dev, order2idx(dev, order), 1); -		if (err) { -			mlx5_ib_warn(dev, "add_keys failed\n"); +		if (err && err != -EAGAIN) { +			mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);  			break;  		}  	} @@ -697,38 +758,58 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,  	if (!mr)  		return ERR_PTR(-EAGAIN); -	mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); +	mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); +	if (!mr->pas) { +		err = -ENOMEM; +		goto free_mr; +	} + +	mlx5_ib_populate_pas(dev, umem, page_shift, +			     mr_align(mr->pas, MLX5_UMR_ALIGN), 1); + +	mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size, +				 DMA_TO_DEVICE); +	if (dma_mapping_error(ddev, mr->dma)) { +		err = -ENOMEM; +		goto free_pas; +	}  	memset(&wr, 0, sizeof(wr)); -	wr.wr_id = (u64)(unsigned long)mr; +	wr.wr_id = (u64)(unsigned long)&umr_context;  	prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags); -	/* We serialize polls so one process does not kidnap another's -	 * completion. This is not a problem since wr is completed in -	 * around 1 usec -	 */ +	mlx5_ib_init_umr_context(&umr_context);  	down(&umrc->sem); -	init_completion(&mr->done);  	err = ib_post_send(umrc->qp, &wr, &bad);  	if (err) {  		mlx5_ib_warn(dev, "post send failed, err %d\n", err); -		up(&umrc->sem); -		goto error; +		goto unmap_dma; +	} else { +		wait_for_completion(&umr_context.done); +		if (umr_context.status != IB_WC_SUCCESS) { +			mlx5_ib_warn(dev, "reg umr failed\n"); +			err = -EFAULT; +		}  	} -	wait_for_completion(&mr->done); + +	mr->mmr.iova = virt_addr; +	mr->mmr.size = len; +	mr->mmr.pd = to_mpd(pd)->pdn; + +unmap_dma:  	up(&umrc->sem); +	dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); -	if (mr->status != IB_WC_SUCCESS) { -		mlx5_ib_warn(dev, "reg umr failed\n"); -		err = -EFAULT; -		goto error; +free_pas: +	kfree(mr->pas); + +free_mr: +	if (err) { +		free_cached_mr(dev, mr); +		return ERR_PTR(err);  	}  	return mr; - -error: -	free_cached_mr(dev, mr); -	return ERR_PTR(err);  }  static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, @@ -763,8 +844,10 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,  	in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));  	in->seg.log2_page_size = page_shift;  	in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); -	in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); -	err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen); +	in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, +							 1 << page_shift)); +	err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL, +				    NULL, NULL);  	if (err) {  		mlx5_ib_warn(dev, "create mkey failed\n");  		goto err_2; @@ -855,24 +938,26 @@ error:  static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)  {  	struct umr_common *umrc = &dev->umrc; +	struct mlx5_ib_umr_context umr_context;  	struct ib_send_wr wr, *bad;  	int err;  	memset(&wr, 0, sizeof(wr)); -	wr.wr_id = (u64)(unsigned long)mr; +	wr.wr_id = (u64)(unsigned long)&umr_context;  	prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); +	mlx5_ib_init_umr_context(&umr_context);  	down(&umrc->sem); -	init_completion(&mr->done);  	err = ib_post_send(umrc->qp, &wr, &bad);  	if (err) {  		up(&umrc->sem);  		mlx5_ib_dbg(dev, "err %d\n", err);  		goto error; +	} else { +		wait_for_completion(&umr_context.done); +		up(&umrc->sem);  	} -	wait_for_completion(&mr->done); -	up(&umrc->sem); -	if (mr->status != IB_WC_SUCCESS) { +	if (umr_context.status != IB_WC_SUCCESS) {  		mlx5_ib_warn(dev, "unreg umr failed\n");  		err = -EFAULT;  		goto error; @@ -921,6 +1006,122 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)  	return 0;  } +struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd, +				struct ib_mr_init_attr *mr_init_attr) +{ +	struct mlx5_ib_dev *dev = to_mdev(pd->device); +	struct mlx5_create_mkey_mbox_in *in; +	struct mlx5_ib_mr *mr; +	int access_mode, err; +	int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4); + +	mr = kzalloc(sizeof(*mr), GFP_KERNEL); +	if (!mr) +		return ERR_PTR(-ENOMEM); + +	in = kzalloc(sizeof(*in), GFP_KERNEL); +	if (!in) { +		err = -ENOMEM; +		goto err_free; +	} + +	in->seg.status = 1 << 6; /* free */ +	in->seg.xlt_oct_size = cpu_to_be32(ndescs); +	in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); +	in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); +	access_mode = MLX5_ACCESS_MODE_MTT; + +	if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) { +		u32 psv_index[2]; + +		in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) | +							   MLX5_MKEY_BSF_EN); +		in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); +		mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); +		if (!mr->sig) { +			err = -ENOMEM; +			goto err_free_in; +		} + +		/* create mem & wire PSVs */ +		err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn, +					   2, psv_index); +		if (err) +			goto err_free_sig; + +		access_mode = MLX5_ACCESS_MODE_KLM; +		mr->sig->psv_memory.psv_idx = psv_index[0]; +		mr->sig->psv_wire.psv_idx = psv_index[1]; + +		mr->sig->sig_status_checked = true; +		mr->sig->sig_err_exists = false; +		/* Next UMR, Arm SIGERR */ +		++mr->sig->sigerr_count; +	} + +	in->seg.flags = MLX5_PERM_UMR_EN | access_mode; +	err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), +				    NULL, NULL, NULL); +	if (err) +		goto err_destroy_psv; + +	mr->ibmr.lkey = mr->mmr.key; +	mr->ibmr.rkey = mr->mmr.key; +	mr->umem = NULL; +	kfree(in); + +	return &mr->ibmr; + +err_destroy_psv: +	if (mr->sig) { +		if (mlx5_core_destroy_psv(&dev->mdev, +					  mr->sig->psv_memory.psv_idx)) +			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", +				     mr->sig->psv_memory.psv_idx); +		if (mlx5_core_destroy_psv(&dev->mdev, +					  mr->sig->psv_wire.psv_idx)) +			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", +				     mr->sig->psv_wire.psv_idx); +	} +err_free_sig: +	kfree(mr->sig); +err_free_in: +	kfree(in); +err_free: +	kfree(mr); +	return ERR_PTR(err); +} + +int mlx5_ib_destroy_mr(struct ib_mr *ibmr) +{ +	struct mlx5_ib_dev *dev = to_mdev(ibmr->device); +	struct mlx5_ib_mr *mr = to_mmr(ibmr); +	int err; + +	if (mr->sig) { +		if (mlx5_core_destroy_psv(&dev->mdev, +					  mr->sig->psv_memory.psv_idx)) +			mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", +				     mr->sig->psv_memory.psv_idx); +		if (mlx5_core_destroy_psv(&dev->mdev, +					  mr->sig->psv_wire.psv_idx)) +			mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", +				     mr->sig->psv_wire.psv_idx); +		kfree(mr->sig); +	} + +	err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); +	if (err) { +		mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", +			     mr->mmr.key, err); +		return err; +	} + +	kfree(mr); + +	return err; +} +  struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,  					int max_page_list_len)  { @@ -948,7 +1149,8 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,  	 * TBD not needed - issue 197292 */  	in->seg.log2_page_size = PAGE_SHIFT; -	err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in)); +	err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL, +				    NULL, NULL);  	kfree(in);  	if (err)  		goto err_free; @@ -1005,3 +1207,44 @@ void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)  	kfree(mfrpl->ibfrpl.page_list);  	kfree(mfrpl);  } + +int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, +			    struct ib_mr_status *mr_status) +{ +	struct mlx5_ib_mr *mmr = to_mmr(ibmr); +	int ret = 0; + +	if (check_mask & ~IB_MR_CHECK_SIG_STATUS) { +		pr_err("Invalid status check mask\n"); +		ret = -EINVAL; +		goto done; +	} + +	mr_status->fail_status = 0; +	if (check_mask & IB_MR_CHECK_SIG_STATUS) { +		if (!mmr->sig) { +			ret = -EINVAL; +			pr_err("signature status check requested on a non-signature enabled MR\n"); +			goto done; +		} + +		mmr->sig->sig_status_checked = true; +		if (!mmr->sig->sig_err_exists) +			goto done; + +		if (ibmr->lkey == mmr->sig->err_item.key) +			memcpy(&mr_status->sig_err, &mmr->sig->err_item, +			       sizeof(mr_status->sig_err)); +		else { +			mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; +			mr_status->sig_err.sig_err_offset = 0; +			mr_status->sig_err.key = mmr->sig->err_item.key; +		} + +		mmr->sig->sig_err_exists = false; +		mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; +	} + +done: +	return ret; +}  | 
