mlx5-fixes-2022-12-28
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmOsm0sACgkQSD+KveBX +j4NAQgAjX16buWsLNdhxHIfCX5AtF96Y1QTBhM/LInv3bjcoraS9SUvjw6W3UtX 2obzgJTp99y/UomoOgIB9ykS51TcvA5htJ2ReUdwVLtaAmVAy5ZnklzMHutb/S1X 16Gp2N1rO1wlEwv71JlMss0jzR0nqQLraP1VkLMGKvV2XxSglx3zIOcHBTkx+KtC tjkMiRNYvN26WK66oubQl2AjjswD4ojfv7mmkX+8k6VZhhQsQZhLt/vT6OOF1qRw BaxpTJnr6mFkiwmfZg9kdW704d4bP3RzTY8xbYO73jf+xbl0XwGS5jwLaDDCc4Uo 0lz/3agl+d8lZmdfwRdtwqlcIRKzRQ== =5A+W -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2022-12-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux mlx5-fixes-2022-12-28
This commit is contained in:
commit
a512807c24
|
|
@ -468,7 +468,7 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
|
|||
bool new_state = val.vbool;
|
||||
|
||||
if (new_state && !MLX5_CAP_GEN(dev, roce) &&
|
||||
!MLX5_CAP_GEN(dev, roce_rw_supported)) {
|
||||
!(MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
@ -563,7 +563,7 @@ static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
|
|||
union devlink_param_value val,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL;
|
||||
return (val.vu32 >= 64 && val.vu32 <= 4096) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static const struct devlink_param mlx5_devlink_params[] = {
|
||||
|
|
|
|||
|
|
@ -459,7 +459,11 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
|
|||
goto unlock;
|
||||
|
||||
for (i = 0; i < priv->channels.num; i++) {
|
||||
struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
|
||||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
struct mlx5e_rq *rq;
|
||||
|
||||
rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ?
|
||||
&c->xskrq : &c->rq;
|
||||
|
||||
err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
|
||||
if (err)
|
||||
|
|
|
|||
|
|
@ -2103,14 +2103,9 @@ out_err:
|
|||
static void
|
||||
mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
|
||||
{
|
||||
bool is_fdb = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB;
|
||||
struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs;
|
||||
char dirname[16] = {};
|
||||
|
||||
if (sscanf(dirname, "ct_%s", is_fdb ? "fdb" : "nic") < 0)
|
||||
return;
|
||||
|
||||
ct_dbgfs->root = debugfs_create_dir(dirname, mlx5_debugfs_get_dev_root(ct_priv->dev));
|
||||
ct_dbgfs->root = debugfs_create_dir("ct", mlx5_debugfs_get_dev_root(ct_priv->dev));
|
||||
debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root,
|
||||
&ct_dbgfs->stats.offloaded);
|
||||
debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root,
|
||||
|
|
|
|||
|
|
@ -222,7 +222,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
|
|||
int err;
|
||||
|
||||
list_for_each_entry(flow, flow_list, tmp_list) {
|
||||
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
|
||||
if (!mlx5e_is_offloaded_flow(flow))
|
||||
continue;
|
||||
|
||||
attr = mlx5e_tc_get_encap_attr(flow);
|
||||
|
|
@ -231,6 +231,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
|
|||
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
|
||||
esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
|
||||
|
||||
/* Clear pkt_reformat before checking slow path flag. Because
|
||||
* in next iteration, the same flow is already set slow path
|
||||
* flag, but still need to clear the pkt_reformat.
|
||||
*/
|
||||
if (flow_flag_test(flow, SLOW))
|
||||
continue;
|
||||
|
||||
/* update from encap rule to slow path rule */
|
||||
spec = &flow->attr->parse_attr->spec;
|
||||
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
|
||||
|
|
|
|||
|
|
@ -273,6 +273,11 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
|
|||
geneve_tlv_option_0_data, be32_to_cpu(opt_data_key));
|
||||
MLX5_SET(fte_match_set_misc3, misc_3_c,
|
||||
geneve_tlv_option_0_data, be32_to_cpu(opt_data_mask));
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
|
||||
ft_field_support.geneve_tlv_option_0_exist)) {
|
||||
MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, geneve_tlv_option_0_exist);
|
||||
MLX5_SET_TO_ONES(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist);
|
||||
}
|
||||
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
|
||||
|
||||
|
|
|
|||
|
|
@ -1305,7 +1305,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
|
|||
sq->channel = c;
|
||||
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
|
||||
sq->min_inline_mode = params->tx_min_inline_mode;
|
||||
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
|
||||
sq->xsk_pool = xsk_pool;
|
||||
|
||||
sq->stats = sq->xsk_pool ?
|
||||
|
|
|
|||
|
|
@ -67,6 +67,7 @@ static void esw_acl_egress_lgcy_groups_destroy(struct mlx5_vport *vport)
|
|||
int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
bool vst_mode_steering = esw_vst_mode_is_steering(esw);
|
||||
struct mlx5_flow_destination drop_ctr_dst = {};
|
||||
struct mlx5_flow_destination *dst = NULL;
|
||||
struct mlx5_fc *drop_counter = NULL;
|
||||
|
|
@ -77,6 +78,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
|
|||
*/
|
||||
int table_size = 2;
|
||||
int dest_num = 0;
|
||||
int actions_flag;
|
||||
int err = 0;
|
||||
|
||||
if (vport->egress.legacy.drop_counter) {
|
||||
|
|
@ -119,8 +121,11 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
|
|||
vport->vport, vport->info.vlan, vport->info.qos);
|
||||
|
||||
/* Allowed vlan rule */
|
||||
actions_flag = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
if (vst_mode_steering)
|
||||
actions_flag |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
|
||||
err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan,
|
||||
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
|
||||
actions_flag);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
|
|
|||
|
|
@ -139,11 +139,14 @@ static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
|
|||
int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
bool vst_mode_steering = esw_vst_mode_is_steering(esw);
|
||||
struct mlx5_flow_destination drop_ctr_dst = {};
|
||||
struct mlx5_flow_destination *dst = NULL;
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
struct mlx5_flow_spec *spec = NULL;
|
||||
struct mlx5_fc *counter = NULL;
|
||||
bool vst_check_cvlan = false;
|
||||
bool vst_push_cvlan = false;
|
||||
/* The ingress acl table contains 4 groups
|
||||
* (2 active rules at the same time -
|
||||
* 1 allow rule from one of the first 3 groups.
|
||||
|
|
@ -203,7 +206,26 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (vport->info.vlan || vport->info.qos)
|
||||
if ((vport->info.vlan || vport->info.qos)) {
|
||||
if (vst_mode_steering)
|
||||
vst_push_cvlan = true;
|
||||
else if (!MLX5_CAP_ESW(esw->dev, vport_cvlan_insert_always))
|
||||
vst_check_cvlan = true;
|
||||
}
|
||||
|
||||
if (vst_check_cvlan || vport->info.spoofchk)
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
|
||||
/* Create ingress allow rule */
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
if (vst_push_cvlan) {
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
|
||||
flow_act.vlan[0].prio = vport->info.qos;
|
||||
flow_act.vlan[0].vid = vport->info.vlan;
|
||||
flow_act.vlan[0].ethtype = ETH_P_8021Q;
|
||||
}
|
||||
|
||||
if (vst_check_cvlan)
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
|
||||
outer_headers.cvlan_tag);
|
||||
|
||||
|
|
@ -218,9 +240,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
|
|||
ether_addr_copy(smac_v, vport->info.mac);
|
||||
}
|
||||
|
||||
/* Create ingress allow rule */
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
|
||||
&flow_act, NULL, 0);
|
||||
if (IS_ERR(vport->ingress.allow_rule)) {
|
||||
|
|
@ -232,6 +251,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!vst_check_cvlan && !vport->info.spoofchk)
|
||||
goto out;
|
||||
|
||||
memset(&flow_act, 0, sizeof(flow_act));
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
|
||||
/* Attach drop flow counter */
|
||||
|
|
@ -257,7 +279,8 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
|
|||
return 0;
|
||||
|
||||
out:
|
||||
esw_acl_ingress_lgcy_cleanup(esw, vport);
|
||||
if (err)
|
||||
esw_acl_ingress_lgcy_cleanup(esw, vport);
|
||||
kvfree(spec);
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -161,10 +161,17 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
|
|||
esw_vport_context.vport_cvlan_strip, 1);
|
||||
|
||||
if (set_flags & SET_VLAN_INSERT) {
|
||||
/* insert only if no vlan in packet */
|
||||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
esw_vport_context.vport_cvlan_insert, 1);
|
||||
|
||||
if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) {
|
||||
/* insert either if vlan exist in packet or not */
|
||||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
esw_vport_context.vport_cvlan_insert,
|
||||
MLX5_VPORT_CVLAN_INSERT_ALWAYS);
|
||||
} else {
|
||||
/* insert only if no vlan in packet */
|
||||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
esw_vport_context.vport_cvlan_insert,
|
||||
MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN);
|
||||
}
|
||||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
esw_vport_context.cvlan_pcp, qos);
|
||||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
|
|
@ -809,6 +816,7 @@ out_free:
|
|||
|
||||
static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
|
||||
{
|
||||
bool vst_mode_steering = esw_vst_mode_is_steering(esw);
|
||||
u16 vport_num = vport->vport;
|
||||
int flags;
|
||||
int err;
|
||||
|
|
@ -839,8 +847,9 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
|
|||
|
||||
flags = (vport->info.vlan || vport->info.qos) ?
|
||||
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
|
||||
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
|
||||
vport->info.qos, flags);
|
||||
if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering)
|
||||
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
|
||||
vport->info.qos, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
@ -1848,6 +1857,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
|||
u16 vport, u16 vlan, u8 qos, u8 set_flags)
|
||||
{
|
||||
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
|
||||
bool vst_mode_steering = esw_vst_mode_is_steering(esw);
|
||||
int err = 0;
|
||||
|
||||
if (IS_ERR(evport))
|
||||
|
|
@ -1855,9 +1865,11 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
|||
if (vlan > 4095 || qos > 7)
|
||||
return -EINVAL;
|
||||
|
||||
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
|
||||
if (err)
|
||||
return err;
|
||||
if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) {
|
||||
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
evport->info.vlan = vlan;
|
||||
evport->info.qos = qos;
|
||||
|
|
|
|||
|
|
@ -527,6 +527,12 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
|
|||
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
||||
u16 vport, u16 vlan, u8 qos, u8 set_flags);
|
||||
|
||||
static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
|
||||
{
|
||||
return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
|
||||
MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
|
||||
}
|
||||
|
||||
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
|
||||
u8 vlan_depth)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -674,6 +674,12 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
|
|||
dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
devlink = priv_to_devlink(dev);
|
||||
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
|
||||
mlx5_core_err(dev, "health works are not permitted at this stage\n");
|
||||
return;
|
||||
}
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
enter_error_state(dev, false);
|
||||
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
|
||||
devl_lock(devlink);
|
||||
|
|
|
|||
|
|
@ -71,6 +71,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
|
|||
params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
|
||||
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
|
||||
params->tunneled_offload_en = false;
|
||||
|
||||
/* CQE compression is not supported for IPoIB */
|
||||
params->rx_cqe_compress_def = false;
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
|
||||
}
|
||||
|
||||
/* Called directly after IPoIB netdevice was created to initialize SW structs */
|
||||
|
|
|
|||
|
|
@ -228,6 +228,7 @@ static void mlx5_ldev_free(struct kref *ref)
|
|||
if (ldev->nb.notifier_call)
|
||||
unregister_netdevice_notifier_net(&init_net, &ldev->nb);
|
||||
mlx5_lag_mp_cleanup(ldev);
|
||||
cancel_delayed_work_sync(&ldev->bond_work);
|
||||
destroy_workqueue(ldev->wq);
|
||||
mlx5_lag_mpesw_cleanup(ldev);
|
||||
mutex_destroy(&ldev->lock);
|
||||
|
|
|
|||
|
|
@ -613,7 +613,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
|
|||
MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
|
||||
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
|
||||
|
||||
if (MLX5_CAP_GEN(dev, roce_rw_supported))
|
||||
if (MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))
|
||||
MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
|
||||
mlx5_is_roce_on(dev));
|
||||
|
||||
|
|
@ -1050,6 +1050,8 @@ err_rl_cleanup:
|
|||
err_tables_cleanup:
|
||||
mlx5_geneve_destroy(dev->geneve);
|
||||
mlx5_vxlan_destroy(dev->vxlan);
|
||||
mlx5_cleanup_clock(dev);
|
||||
mlx5_cleanup_reserved_gids(dev);
|
||||
mlx5_cq_debugfs_cleanup(dev);
|
||||
mlx5_fw_reset_cleanup(dev);
|
||||
err_events_cleanup:
|
||||
|
|
|
|||
|
|
@ -1090,6 +1090,11 @@ enum {
|
|||
MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN = 0x1,
|
||||
MLX5_VPORT_CVLAN_INSERT_ALWAYS = 0x3,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_L3_PROT_TYPE_IPV4 = 0,
|
||||
MLX5_L3_PROT_TYPE_IPV6 = 1,
|
||||
|
|
|
|||
|
|
@ -913,7 +913,8 @@ struct mlx5_ifc_e_switch_cap_bits {
|
|||
u8 vport_svlan_insert[0x1];
|
||||
u8 vport_cvlan_insert_if_not_exist[0x1];
|
||||
u8 vport_cvlan_insert_overwrite[0x1];
|
||||
u8 reserved_at_5[0x2];
|
||||
u8 reserved_at_5[0x1];
|
||||
u8 vport_cvlan_insert_always[0x1];
|
||||
u8 esw_shared_ingress_acl[0x1];
|
||||
u8 esw_uplink_ingress_acl[0x1];
|
||||
u8 root_ft_on_other_esw[0x1];
|
||||
|
|
|
|||
Loading…
Reference in New Issue