diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/icm.c')
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/icm.c | 49 | 
1 files changed, 34 insertions, 15 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 31d02649be4..97c9b1db1d2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)  	kfree(icm);  } -static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) +static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, +				gfp_t gfp_mask, int node)  {  	struct page *page; -	page = alloc_pages(gfp_mask, order); -	if (!page) -		return -ENOMEM; +	page = alloc_pages_node(node, gfp_mask, order); +	if (!page) { +		page = alloc_pages(gfp_mask, order); +		if (!page) +			return -ENOMEM; +	}  	sg_set_page(mem, page, PAGE_SIZE << order, 0);  	return 0; @@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,  	/* We use sg_set_buf for coherent allocs, which assumes low memory */  	BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); -	icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); -	if (!icm) -		return NULL; +	icm = kmalloc_node(sizeof(*icm), +			   gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), +			   dev->numa_node); +	if (!icm) { +		icm = kmalloc(sizeof(*icm), +			      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); +		if (!icm) +			return NULL; +	}  	icm->refcount = 0;  	INIT_LIST_HEAD(&icm->chunk_list); @@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,  	while (npages > 0) {  		if (!chunk) { -			chunk = kmalloc(sizeof *chunk, -					gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); -			if (!chunk) -				goto fail; +			chunk = kmalloc_node(sizeof(*chunk), +					     gfp_mask & ~(__GFP_HIGHMEM | +							  __GFP_NOWARN), +					     dev->numa_node); +			if (!chunk) { +				chunk = kmalloc(sizeof(*chunk), +						gfp_mask & ~(__GFP_HIGHMEM | +							     __GFP_NOWARN)); +				if (!chunk) +					goto fail; +			}  			sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);  			chunk->npages = 0; @@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,  						      cur_order, gfp_mask);  		else  			ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], -						   cur_order, gfp_mask); +						   cur_order, gfp_mask, +						   dev->numa_node);  		if (ret) {  			if (--cur_order < 0) @@ -227,7 +245,8 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)  			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);  } -int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, +		   gfp_t gfp)  {  	u32 i = (obj & (table->num_obj - 1)) /  			(MLX4_TABLE_CHUNK_SIZE / table->obj_size); @@ -241,7 +260,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)  	}  	table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, -				       (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | +				       (table->lowmem ? gfp : GFP_HIGHUSER) |  				       __GFP_NOWARN, table->coherent);  	if (!table->icm[i]) {  		ret = -ENOMEM; @@ -338,7 +357,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,  	u32 i;  	for (i = start; i <= end; i += inc) { -		err = mlx4_table_get(dev, table, i); +		err = mlx4_table_get(dev, table, i, GFP_KERNEL);  		if (err)  			goto fail;  	}  | 
