diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/resource_tracker.c')
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 613 |
1 files changed, 450 insertions, 163 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 2f3f2bc7f28..0efc1368e5a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -52,6 +52,8 @@ struct mac_res { struct list_head list; u64 mac; + int ref_count; + u8 smac_index; u8 port; }; @@ -219,6 +221,11 @@ struct res_fs_rule { int qpn; }; +static int mlx4_is_eth(struct mlx4_dev *dev, int port) +{ + return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1; +} + static void *res_tracker_lookup(struct rb_root *root, u64 res_id) { struct rb_node *node = root->rb_node; @@ -272,7 +279,7 @@ enum qp_transition { }; /* For Debug uses */ -static const char *ResourceType(enum mlx4_resource rt) +static const char *resource_str(enum mlx4_resource rt) { switch (rt) { case RES_QP: return "RES_QP"; @@ -300,6 +307,7 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, &priv->mfunc.master.res_tracker.res_alloc[res_type]; int err = -EINVAL; int allocated, free, reserved, guaranteed, from_free; + int from_rsvd; if (slave > dev->num_vfs) return -EINVAL; @@ -314,11 +322,16 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, res_alloc->res_reserved; guaranteed = res_alloc->guaranteed[slave]; - if (allocated + count > res_alloc->quota[slave]) + if (allocated + count > res_alloc->quota[slave]) { + mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n", + slave, port, resource_str(res_type), count, + allocated, res_alloc->quota[slave]); goto out; + } if (allocated + count <= guaranteed) { err = 0; + from_rsvd = count; } else { /* portion may need to be obtained from free area */ if (guaranteed - allocated > 0) @@ -326,8 +339,14 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, else from_free = count; - if (free - from_free > reserved) + from_rsvd = count - from_free; + + if (free - from_free >= reserved) err = 0; + else + mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n", + slave, port, resource_str(res_type), free, + from_free, reserved); } if (!err) { @@ -335,9 +354,11 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, if (port > 0) { res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; res_alloc->res_port_free[port - 1] -= count; + res_alloc->res_port_rsvd[port - 1] -= from_rsvd; } else { res_alloc->allocated[slave] += count; res_alloc->res_free -= count; + res_alloc->res_reserved -= from_rsvd; } } @@ -353,17 +374,36 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, struct mlx4_priv *priv = mlx4_priv(dev); struct resource_allocator *res_alloc = &priv->mfunc.master.res_tracker.res_alloc[res_type]; + int allocated, guaranteed, from_rsvd; if (slave > dev->num_vfs) return; spin_lock(&res_alloc->alloc_lock); + + allocated = (port > 0) ? + res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : + res_alloc->allocated[slave]; + guaranteed = res_alloc->guaranteed[slave]; + + if (allocated - count >= guaranteed) { + from_rsvd = 0; + } else { + /* portion may need to be returned to reserved area */ + if (allocated - guaranteed > 0) + from_rsvd = count - (allocated - guaranteed); + else + from_rsvd = count; + } + if (port > 0) { res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; res_alloc->res_port_free[port - 1] += count; + res_alloc->res_port_rsvd[port - 1] += from_rsvd; } else { res_alloc->allocated[slave] -= count; res_alloc->res_free += count; + res_alloc->res_reserved += from_rsvd; } spin_unlock(&res_alloc->alloc_lock); @@ -461,6 +501,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) spin_lock_init(&res_alloc->alloc_lock); for (t = 0; t < dev->num_vfs + 1; t++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, t); switch (i) { case RES_QP: initialize_res_quotas(dev, res_alloc, RES_QP, @@ -490,10 +532,27 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) break; case RES_MAC: if (t == mlx4_master_func_num(dev)) { - res_alloc->quota[t] = MLX4_MAX_MAC_NUM; + int max_vfs_pport = 0; + /* Calculate the max vfs per port for */ + /* both ports. */ + for (j = 0; j < dev->caps.num_ports; + j++) { + struct mlx4_slaves_pport slaves_pport = + mlx4_phys_to_slaves_pport(dev, j + 1); + unsigned current_slaves = + bitmap_weight(slaves_pport.slaves, + dev->caps.num_ports) - 1; + if (max_vfs_pport < current_slaves) + max_vfs_pport = + current_slaves; + } + res_alloc->quota[t] = + MLX4_MAX_MAC_NUM - + 2 * max_vfs_pport; res_alloc->guaranteed[t] = 2; for (j = 0; j < MLX4_MAX_PORTS; j++) - res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM; + res_alloc->res_port_free[j] = + MLX4_MAX_MAC_NUM; } else { res_alloc->quota[t] = MLX4_MAX_MAC_NUM; res_alloc->guaranteed[t] = 2; @@ -521,9 +580,10 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) break; } if (i == RES_MAC || i == RES_VLAN) { - for (j = 0; j < MLX4_MAX_PORTS; j++) - res_alloc->res_port_rsvd[j] += - res_alloc->guaranteed[t]; + for (j = 0; j < dev->caps.num_ports; j++) + if (test_bit(j, actv_ports.ports)) + res_alloc->res_port_rsvd[j] += + res_alloc->guaranteed[t]; } else { res_alloc->res_reserved += res_alloc->guaranteed[t]; } @@ -559,6 +619,7 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev, } /* free master's vlans */ i = dev->caps.function; + mlx4_reset_roce_gids(dev, i); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); rem_slave_vlans(dev, i); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); @@ -600,15 +661,37 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, struct mlx4_qp_context *qp_ctx = inbox->buf + 8; enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; + int port; - if (MLX4_QP_ST_UD == ts) - qp_ctx->pri_path.mgid_index = 0x80 | slave; - - if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) { - if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) - qp_ctx->pri_path.mgid_index = slave & 0x7F; - if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) - qp_ctx->alt_path.mgid_index = slave & 0x7F; + if (MLX4_QP_ST_UD == ts) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) + qp_ctx->pri_path.mgid_index = + mlx4_get_base_gid_ix(dev, slave, port) | 0x80; + else + qp_ctx->pri_path.mgid_index = slave | 0x80; + + } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) { + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) { + qp_ctx->pri_path.mgid_index += + mlx4_get_base_gid_ix(dev, slave, port); + qp_ctx->pri_path.mgid_index &= 0x7f; + } else { + qp_ctx->pri_path.mgid_index = slave & 0x7F; + } + } + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) { + qp_ctx->alt_path.mgid_index += + mlx4_get_base_gid_ix(dev, slave, port); + qp_ctx->alt_path.mgid_index &= 0x7f; + } else { + qp_ctx->alt_path.mgid_index = slave & 0x7F; + } + } } } @@ -619,7 +702,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_qp_context *qpc = inbox->buf + 8; struct mlx4_vport_oper_state *vp_oper; struct mlx4_priv *priv; - u32 qp_type; int port; port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; @@ -627,12 +709,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev, vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (MLX4_VGT != vp_oper->state.default_vlan) { - qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; - if (MLX4_QP_ST_RC == qp_type || - (MLX4_QP_ST_UD == qp_type && - !mlx4_is_qp_reserved(dev, qpn))) - return -EINVAL; - /* the reserved QPs (special, proxy, tunnel) * do not operate over vlans */ @@ -920,7 +996,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, ret = alloc_srq_tr(id); break; case RES_MAC: - printk(KERN_ERR "implementation missing\n"); + pr_err("implementation missing\n"); return NULL; case RES_COUNTER: ret = alloc_counter_tr(id); @@ -1014,10 +1090,10 @@ static int remove_mtt_ok(struct res_mtt *res, int order) { if (res->com.state == RES_MTT_BUSY || atomic_read(&res->ref_count)) { - printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n", - __func__, __LINE__, - mtt_states_str(res->com.state), - atomic_read(&res->ref_count)); + pr_devel("%s-%d: state %s, ref_count %d\n", + __func__, __LINE__, + mtt_states_str(res->com.state), + atomic_read(&res->ref_count)); return -EBUSY; } else if (res->com.state != RES_MTT_ALLOCATED) return -EPERM; @@ -1340,43 +1416,29 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, spin_lock_irq(mlx4_tlock(dev)); r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn); - if (!r) + if (!r) { err = -ENOENT; - else if (r->com.owner != slave) + } else if (r->com.owner != slave) { err = -EPERM; - else { - switch (state) { - case RES_CQ_BUSY: - err = -EBUSY; - break; - - case RES_CQ_ALLOCATED: - if (r->com.state != RES_CQ_HW) - err = -EINVAL; - else if (atomic_read(&r->ref_count)) - err = -EBUSY; - else - err = 0; - break; - - case RES_CQ_HW: - if (r->com.state != RES_CQ_ALLOCATED) - err = -EINVAL; - else - err = 0; - break; - - default: + } else if (state == RES_CQ_ALLOCATED) { + if (r->com.state != RES_CQ_HW) err = -EINVAL; - } + else if (atomic_read(&r->ref_count)) + err = -EBUSY; + else + err = 0; + } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) { + err = -EINVAL; + } else { + err = 0; + } - if (!err) { - r->com.from_state = r->com.state; - r->com.to_state = state; - r->com.state = RES_CQ_BUSY; - if (cq) - *cq = r; - } + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_CQ_BUSY; + if (cq) + *cq = r; } spin_unlock_irq(mlx4_tlock(dev)); @@ -1385,7 +1447,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, } static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, - enum res_cq_states state, struct res_srq **srq) + enum res_srq_states state, struct res_srq **srq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; @@ -1394,39 +1456,25 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, spin_lock_irq(mlx4_tlock(dev)); r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index); - if (!r) + if (!r) { err = -ENOENT; - else if (r->com.owner != slave) + } else if (r->com.owner != slave) { err = -EPERM; - else { - switch (state) { - case RES_SRQ_BUSY: - err = -EINVAL; - break; - - case RES_SRQ_ALLOCATED: - if (r->com.state != RES_SRQ_HW) - err = -EINVAL; - else if (atomic_read(&r->ref_count)) - err = -EBUSY; - break; - - case RES_SRQ_HW: - if (r->com.state != RES_SRQ_ALLOCATED) - err = -EINVAL; - break; - - default: + } else if (state == RES_SRQ_ALLOCATED) { + if (r->com.state != RES_SRQ_HW) err = -EINVAL; - } + else if (atomic_read(&r->ref_count)) + err = -EBUSY; + } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) { + err = -EINVAL; + } - if (!err) { - r->com.from_state = r->com.state; - r->com.to_state = state; - r->com.state = RES_SRQ_BUSY; - if (srq) - *srq = r; - } + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_SRQ_BUSY; + if (srq) + *srq = r; } spin_unlock_irq(mlx4_tlock(dev)); @@ -1518,7 +1566,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, return err; if (!fw_reserved(dev, qpn)) { - err = __mlx4_qp_alloc_icm(dev, qpn); + err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL); if (err) { res_abort_move(dev, slave, RES_QP, qpn); return err; @@ -1605,7 +1653,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, if (err) return err; - err = __mlx4_mpt_alloc_icm(dev, mpt->key); + err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL); if (err) { res_abort_move(dev, slave, RES_MPT, id); return err; @@ -1687,11 +1735,39 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, return err; } -static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) +static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, + u8 smac_index, u64 *mac) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct mac_res *res; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->smac_index == smac_index && res->port == (u8) port) { + *mac = res->mac; + return 0; + } + } + return -ENOENT; +} + +static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->mac == mac && res->port == (u8) port) { + /* mac found. update ref count */ + ++res->ref_count; + return 0; + } + } if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) return -EINVAL; @@ -1702,6 +1778,8 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) } res->mac = mac; res->port = (u8) port; + res->smac_index = smac_index; + res->ref_count = 1; list_add_tail(&res->list, &tracker->slave_list[slave].res_list[RES_MAC]); return 0; @@ -1718,9 +1796,11 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, list_for_each_entry_safe(res, tmp, mac_list, list) { if (res->mac == mac && res->port == (u8) port) { - list_del(&res->list); - mlx4_release_resource(dev, slave, RES_MAC, 1, port); - kfree(res); + if (!--res->ref_count) { + list_del(&res->list); + mlx4_release_resource(dev, slave, RES_MAC, 1, port); + kfree(res); + } break; } } @@ -1733,10 +1813,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave) struct list_head *mac_list = &tracker->slave_list[slave].res_list[RES_MAC]; struct mac_res *res, *tmp; + int i; list_for_each_entry_safe(res, tmp, mac_list, list) { list_del(&res->list); - __mlx4_unregister_mac(dev, res->port, res->mac); + /* dereference the mac the num times the slave referenced it */ + for (i = 0; i < res->ref_count; i++) + __mlx4_unregister_mac(dev, res->port, res->mac); mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); kfree(res); } @@ -1748,21 +1831,28 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, int err = -EINVAL; int port; u64 mac; + u8 smac_index; if (op != RES_OP_RESERVE_AND_MAP) return err; port = !in_port ? get_param_l(out_param) : in_port; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; mac = in_param; err = __mlx4_register_mac(dev, port, mac); if (err >= 0) { + smac_index = err; set_param_l(out_param, err); err = 0; } if (!err) { - err = mac_add_to_slave(dev, slave, mac, port); + err = mac_add_to_slave(dev, slave, mac, port, smac_index); if (err) __mlx4_unregister_mac(dev, port, mac); } @@ -1859,6 +1949,11 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, if (!port || op != RES_OP_RESERVE_AND_MAP) return -EINVAL; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ if (!in_port && port > 0 && port <= dev->caps.num_ports) { slave_state[slave].old_vlan_api = true; @@ -2156,6 +2251,11 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, switch (op) { case RES_OP_RESERVE_AND_MAP: port = !in_port ? get_param_l(out_param) : in_port; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; mac_del_from_slave(dev, slave, in_param, port); __mlx4_unregister_mac(dev, port, in_param); break; @@ -2175,6 +2275,11 @@ static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; int err = 0; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; switch (op) { case RES_OP_RESERVE_AND_MAP: if (slave_state[slave].old_vlan_api) @@ -2756,12 +2861,16 @@ static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, } static int verify_qp_parameters(struct mlx4_dev *dev, + struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, enum qp_transition transition, u8 slave) { u32 qp_type; + u32 qpn; struct mlx4_qp_context *qp_ctx; enum mlx4_qp_optpar optpar; + int port; + int num_gids; qp_ctx = inbox->buf + 8; qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; @@ -2769,6 +2878,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev, switch (qp_type) { case MLX4_QP_ST_RC: + case MLX4_QP_ST_XRC: case MLX4_QP_ST_UC: switch (transition) { case QP_TRANS_INIT2RTR: @@ -2777,19 +2887,44 @@ static int verify_qp_parameters(struct mlx4_dev *dev, case QP_TRANS_SQD2SQD: case QP_TRANS_SQD2RTS: if (slave != mlx4_master_func_num(dev)) - /* slaves have only gid index 0 */ - if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) - if (qp_ctx->pri_path.mgid_index) + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + else + num_gids = 1; + if (qp_ctx->pri_path.mgid_index >= num_gids) return -EINVAL; - if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) - if (qp_ctx->alt_path.mgid_index) + } + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + else + num_gids = 1; + if (qp_ctx->alt_path.mgid_index >= num_gids) return -EINVAL; + } break; default: break; } + break; + case MLX4_QP_ST_MLX: + qpn = vhcr->in_modifier & 0x7fffff; + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (transition == QP_TRANS_INIT2RTR && + slave != mlx4_master_func_num(dev) && + mlx4_is_qp_reserved(dev, qpn) && + !mlx4_vf_smi_enabled(dev, slave, port)) { + /* only enabled VFs may create MLX proxy QPs */ + mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n", + __func__, slave, port); + return -EPERM; + } break; + default: break; } @@ -3296,6 +3431,58 @@ int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); } +static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox) +{ + enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); + u8 pri_sched_queue; + int port = mlx4_slave_convert_port( + dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; + + if (port < 0) + return -EINVAL; + + pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | + ((port & 1) << 6); + + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH || + mlx4_is_eth(dev, port + 1)) { + qpc->pri_path.sched_queue = pri_sched_queue; + } + + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = mlx4_slave_convert_port( + dev, slave, (qpc->alt_path.sched_queue >> 6 & 1) + + 1) - 1; + if (port < 0) + return -EINVAL; + qpc->alt_path.sched_queue = + (qpc->alt_path.sched_queue & ~(1 << 6)) | + (port & 1) << 6; + } + return 0; +} + +static int roce_verify_mac(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox) +{ + u64 mac; + int port; + u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + u8 sched = *(u8 *)(inbox->buf + 64); + u8 smac_ix; + + port = (sched >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { + smac_ix = qpc->pri_path.grh_mylmc & 0x7f; + if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac)) + return -ENOENT; + } + return 0; +} + int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -3314,9 +3501,15 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, u8 orig_vlan_index = qpc->pri_path.vlan_index; u8 orig_feup = qpc->pri_path.feup; - err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); + err = adjust_qp_sched_queue(dev, slave, qpc, inbox); if (err) return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave); + if (err) + return err; + + if (roce_verify_mac(dev, slave, qpc, inbox)) + return -EINVAL; update_pkey_index(dev, slave, inbox); update_gid(dev, inbox, (u8)slave); @@ -3362,7 +3555,10 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, int err; struct mlx4_qp_context *context = inbox->buf + 8; - err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave); + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave); if (err) return err; @@ -3381,7 +3577,10 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, int err; struct mlx4_qp_context *context = inbox->buf + 8; - err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave); + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave); if (err) return err; @@ -3399,6 +3598,9 @@ int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd) { struct mlx4_qp_context *context = inbox->buf + 8; + int err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; adjust_proxy_tun_qkey(dev, vhcr, context); return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); } @@ -3412,7 +3614,10 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, int err; struct mlx4_qp_context *context = inbox->buf + 8; - err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave); + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave); if (err) return err; @@ -3431,7 +3636,10 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, int err; struct mlx4_qp_context *context = inbox->buf + 8; - err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave); + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave); if (err) return err; @@ -3534,16 +3742,26 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, return err; } -static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - int block_loopback, enum mlx4_protocol prot, +static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, + u8 gid[16], int block_loopback, enum mlx4_protocol prot, enum mlx4_steer_type type, u64 *reg_id) { switch (dev->caps.steering_mode) { - case MLX4_STEERING_MODE_DEVICE_MANAGED: - return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5], + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + int port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (port < 0) + return port; + return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, block_loopback, prot, reg_id); + } case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) { + int port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (port < 0) + return port; + gid[5] = port; + } return mlx4_qp_attach_common(dev, qp, gid, block_loopback, prot, type); default: @@ -3551,9 +3769,9 @@ static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], } } -static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - enum mlx4_protocol prot, enum mlx4_steer_type type, - u64 reg_id) +static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], enum mlx4_protocol prot, + enum mlx4_steer_type type, u64 reg_id) { switch (dev->caps.steering_mode) { case MLX4_STEERING_MODE_DEVICE_MANAGED: @@ -3565,6 +3783,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], } } +static int mlx4_adjust_port(struct mlx4_dev *dev, int slave, + u8 *gid, enum mlx4_protocol prot) +{ + int real_port; + + if (prot != MLX4_PROT_ETH) + return 0; + + if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || + dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { + real_port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (real_port < 0) + return -EINVAL; + gid[5] = real_port; + } + + return 0; +} + int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -3590,7 +3827,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, qp.qpn = qpn; if (attach) { - err = qp_attach(dev, &qp, gid, block_loopback, prot, + err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, type, ®_id); if (err) { pr_err("Fail to attach rule to qp 0x%x\n", qpn); @@ -3600,6 +3837,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, if (err) goto ex_detach; } else { + err = mlx4_adjust_port(dev, slave, gid, prot); + if (err) + goto ex_put; + err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id); if (err) goto ex_put; @@ -3634,7 +3875,7 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { list_for_each_entry_safe(res, tmp, rlist, list) { be_mac = cpu_to_be64(res->mac << 16); - if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN)) + if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac)) return 0; } pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n", @@ -3689,7 +3930,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave, } } if (!be_mac) { - pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", + pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n", port); return -EINVAL; } @@ -3704,6 +3945,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave, } +#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX) +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd_info) +{ + int err; + u32 qpn = vhcr->in_modifier & 0xffffff; + struct res_qp *rqp; + u64 mac; + unsigned port; + u64 pri_addr_path_mask; + struct mlx4_update_qp_context *cmd; + int smac_index; + + cmd = (struct mlx4_update_qp_context *)inbox->buf; + + pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); + if (cmd->qp_mask || cmd->secondary_addr_path_mask || + (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) + return -EPERM; + + /* Just change the smac for the QP */ + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) { + mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); + return err; + } + + port = (rqp->sched_queue >> 6 & 1) + 1; + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } + + err = mlx4_cmd(dev, inbox->dma, + vhcr->in_modifier, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); + goto err_mac; + } + +err_mac: + put_res(dev, slave, qpn, RES_QP); + return err; +} + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -3726,10 +4021,13 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, return -EOPNOTSUPP; ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); + if (ctrl->port <= 0) + return -EINVAL; qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { - pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); + pr_err("Steering rule with qpn 0x%x rejected\n", qpn); return err; } rule_header = (struct _rule_hw *)(ctrl + 1); @@ -3747,7 +4045,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, case MLX4_NET_TRANS_RULE_ID_IPV4: case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_UDP: - pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); + pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); if (add_eth_header(dev, slave, inbox, rlist, header_id)) { err = -EINVAL; goto err_put; @@ -3756,7 +4054,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; break; default: - pr_err("Corrupted mailbox.\n"); + pr_err("Corrupted mailbox\n"); err = -EINVAL; goto err_put; } @@ -3770,7 +4068,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); if (err) { - mlx4_err(dev, "Fail to add flow steering resources.\n "); + mlx4_err(dev, "Fail to add flow steering resources\n"); /* detach rule*/ mlx4_cmd(dev, vhcr->out_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, @@ -3808,7 +4106,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); if (err) { - mlx4_err(dev, "Fail to remove flow steering resources.\n "); + mlx4_err(dev, "Fail to remove flow steering resources\n"); goto out; } @@ -3886,7 +4184,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave, if (print) mlx4_dbg(dev, "%s id 0x%llx is busy\n", - ResourceType(type), + resource_str(type), r->res_id); ++busy; } else { @@ -3937,8 +4235,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave) err = move_all_busy(dev, slave, RES_QP); if (err) - mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" - "for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n", + slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(qp, tmp, qp_list, com.list) { @@ -3976,10 +4274,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_qps: failed" - " to move slave %d qpn %d to" - " reset\n", slave, - qp->local_qpn); + mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n", + slave, qp->local_qpn); atomic_dec(&qp->rcq->ref_count); atomic_dec(&qp->scq->ref_count); atomic_dec(&qp->mtt->ref_count); @@ -4013,8 +4309,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave) err = move_all_busy(dev, slave, RES_SRQ); if (err) - mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n", + slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(srq, tmp, srq_list, com.list) { @@ -4044,9 +4340,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_srqs: failed" - " to move slave %d srq %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n", slave, srqn); atomic_dec(&srq->mtt->ref_count); @@ -4081,8 +4375,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave) err = move_all_busy(dev, slave, RES_CQ); if (err) - mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n", + slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(cq, tmp, cq_list, com.list) { @@ -4112,9 +4406,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_cqs: failed" - " to move slave %d cq %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n", slave, cqn); atomic_dec(&cq->mtt->ref_count); state = RES_CQ_ALLOCATED; @@ -4146,8 +4438,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) err = move_all_busy(dev, slave, RES_MPT); if (err) - mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n", + slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { @@ -4182,9 +4474,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_mrs: failed" - " to move slave %d mpt %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n", slave, mptn); if (mpt->mtt) atomic_dec(&mpt->mtt->ref_count); @@ -4216,8 +4506,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) err = move_all_busy(dev, slave, RES_MTT); if (err) - mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n", + slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { @@ -4319,8 +4609,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) err = move_all_busy(dev, slave, RES_EQ); if (err) - mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n", + slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(eq, tmp, eq_list, com.list) { @@ -4352,9 +4642,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_eqs: failed" - " to move slave %d eqs %d to" - " SW ownership\n", slave, eqn); + mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", + slave, eqn); mlx4_free_cmd_mailbox(dev, mailbox); atomic_dec(&eq->mtt->ref_count); state = RES_EQ_RESERVED; @@ -4383,8 +4672,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave) err = move_all_busy(dev, slave, RES_COUNTER); if (err) - mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", + slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(counter, tmp, counter_list, com.list) { @@ -4414,8 +4703,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) err = move_all_busy(dev, slave, RES_XRCD); if (err) - mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n", + slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { @@ -4433,7 +4722,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); - + mlx4_reset_roce_gids(dev, slave); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); rem_slave_vlans(dev, slave); rem_slave_macs(dev, slave); @@ -4560,10 +4849,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) 0, MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (err) { - mlx4_info(dev, "UPDATE_QP failed for slave %d, " - "port %d, qpn %d (%d)\n", - work->slave, port, qp->local_qpn, - err); + mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n", + work->slave, port, qp->local_qpn, err); errors++; } } |
