Merge: Backport more upstream MANA patches to RHEL 10.2
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/1435 JIRA: https://issues.redhat.com/browse/RHEL-109580 Tested: passed basic tests on an Azure D2s_v6 instance. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Approved-by: Vitaly Kuznetsov <vkuznets@redhat.com> Approved-by: John W. Linville <linville@redhat.com> Approved-by: Michal Schmidt <mschmidt@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: CKI GitLab Kmaint Pipeline Bot <26919896-cki-kmaint-pipeline-bot@users.noreply.gitlab.com>
This commit is contained in:
commit
aa323ecf59
|
@ -32,8 +32,32 @@ static const struct rdma_stat_desc mana_ib_port_stats_desc[] = {
|
|||
[MANA_IB_RATE_INC_EVENTS].name = "rate_inc_events",
|
||||
[MANA_IB_NUM_QPS_RECOVERED].name = "num_qps_recovered",
|
||||
[MANA_IB_CURRENT_RATE].name = "current_rate",
|
||||
[MANA_IB_DUP_RX_REQ].name = "dup_rx_requests",
|
||||
[MANA_IB_TX_BYTES].name = "tx_bytes",
|
||||
[MANA_IB_RX_BYTES].name = "rx_bytes",
|
||||
[MANA_IB_RX_SEND_REQ].name = "rx_send_requests",
|
||||
[MANA_IB_RX_WRITE_REQ].name = "rx_write_requests",
|
||||
[MANA_IB_RX_READ_REQ].name = "rx_read_requests",
|
||||
[MANA_IB_TX_PKT].name = "tx_packets",
|
||||
[MANA_IB_RX_PKT].name = "rx_packets",
|
||||
};
|
||||
|
||||
static const struct rdma_stat_desc mana_ib_device_stats_desc[] = {
|
||||
[MANA_IB_SENT_CNPS].name = "sent_cnps",
|
||||
[MANA_IB_RECEIVED_ECNS].name = "received_ecns",
|
||||
[MANA_IB_RECEIVED_CNP_COUNT].name = "received_cnp_count",
|
||||
[MANA_IB_QP_CONGESTED_EVENTS].name = "qp_congested_events",
|
||||
[MANA_IB_QP_RECOVERED_EVENTS].name = "qp_recovered_events",
|
||||
[MANA_IB_DEV_RATE_INC_EVENTS].name = "rate_inc_events",
|
||||
};
|
||||
|
||||
struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev)
|
||||
{
|
||||
return rdma_alloc_hw_stats_struct(mana_ib_device_stats_desc,
|
||||
ARRAY_SIZE(mana_ib_device_stats_desc),
|
||||
RDMA_HW_STATS_DEFAULT_LIFESPAN);
|
||||
}
|
||||
|
||||
struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
|
||||
u32 port_num)
|
||||
{
|
||||
|
@ -42,8 +66,39 @@ struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
|
|||
RDMA_HW_STATS_DEFAULT_LIFESPAN);
|
||||
}
|
||||
|
||||
int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
|
||||
u32 port_num, int index)
|
||||
static int mana_ib_get_hw_device_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats)
|
||||
{
|
||||
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
|
||||
ib_dev);
|
||||
struct mana_rnic_query_device_cntrs_resp resp = {};
|
||||
struct mana_rnic_query_device_cntrs_req req = {};
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_DEVICE_COUNTERS,
|
||||
sizeof(req), sizeof(resp));
|
||||
req.hdr.dev_id = mdev->gdma_dev->dev_id;
|
||||
req.adapter = mdev->adapter_handle;
|
||||
|
||||
err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req,
|
||||
sizeof(resp), &resp);
|
||||
if (err) {
|
||||
ibdev_err(&mdev->ib_dev, "Failed to query device counters err %d",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
stats->value[MANA_IB_SENT_CNPS] = resp.sent_cnps;
|
||||
stats->value[MANA_IB_RECEIVED_ECNS] = resp.received_ecns;
|
||||
stats->value[MANA_IB_RECEIVED_CNP_COUNT] = resp.received_cnp_count;
|
||||
stats->value[MANA_IB_QP_CONGESTED_EVENTS] = resp.qp_congested_events;
|
||||
stats->value[MANA_IB_QP_RECOVERED_EVENTS] = resp.qp_recovered_events;
|
||||
stats->value[MANA_IB_DEV_RATE_INC_EVENTS] = resp.rate_inc_events;
|
||||
|
||||
return ARRAY_SIZE(mana_ib_device_stats_desc);
|
||||
}
|
||||
|
||||
static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
|
||||
u32 port_num)
|
||||
{
|
||||
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
|
||||
ib_dev);
|
||||
|
@ -53,6 +108,7 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
|
|||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_VF_COUNTERS,
|
||||
sizeof(req), sizeof(resp));
|
||||
req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
|
||||
req.hdr.dev_id = mdev->gdma_dev->dev_id;
|
||||
req.adapter = mdev->adapter_handle;
|
||||
|
||||
|
@ -101,5 +157,23 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
|
|||
stats->value[MANA_IB_NUM_QPS_RECOVERED] = resp.num_qps_recovered;
|
||||
stats->value[MANA_IB_CURRENT_RATE] = resp.current_rate;
|
||||
|
||||
stats->value[MANA_IB_DUP_RX_REQ] = resp.dup_rx_req;
|
||||
stats->value[MANA_IB_TX_BYTES] = resp.tx_bytes;
|
||||
stats->value[MANA_IB_RX_BYTES] = resp.rx_bytes;
|
||||
stats->value[MANA_IB_RX_SEND_REQ] = resp.rx_send_req;
|
||||
stats->value[MANA_IB_RX_WRITE_REQ] = resp.rx_write_req;
|
||||
stats->value[MANA_IB_RX_READ_REQ] = resp.rx_read_req;
|
||||
stats->value[MANA_IB_TX_PKT] = resp.tx_pkt;
|
||||
stats->value[MANA_IB_RX_PKT] = resp.rx_pkt;
|
||||
|
||||
return ARRAY_SIZE(mana_ib_port_stats_desc);
|
||||
}
|
||||
|
||||
int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
|
||||
u32 port_num, int index)
|
||||
{
|
||||
if (!port_num)
|
||||
return mana_ib_get_hw_device_stats(ibdev, stats);
|
||||
else
|
||||
return mana_ib_get_hw_port_stats(ibdev, stats, port_num);
|
||||
}
|
||||
|
|
|
@ -35,10 +35,28 @@ enum mana_ib_port_counters {
|
|||
MANA_IB_RATE_INC_EVENTS,
|
||||
MANA_IB_NUM_QPS_RECOVERED,
|
||||
MANA_IB_CURRENT_RATE,
|
||||
MANA_IB_DUP_RX_REQ,
|
||||
MANA_IB_TX_BYTES,
|
||||
MANA_IB_RX_BYTES,
|
||||
MANA_IB_RX_SEND_REQ,
|
||||
MANA_IB_RX_WRITE_REQ,
|
||||
MANA_IB_RX_READ_REQ,
|
||||
MANA_IB_TX_PKT,
|
||||
MANA_IB_RX_PKT,
|
||||
};
|
||||
|
||||
enum mana_ib_device_counters {
|
||||
MANA_IB_SENT_CNPS,
|
||||
MANA_IB_RECEIVED_ECNS,
|
||||
MANA_IB_RECEIVED_CNP_COUNT,
|
||||
MANA_IB_QP_CONGESTED_EVENTS,
|
||||
MANA_IB_QP_RECOVERED_EVENTS,
|
||||
MANA_IB_DEV_RATE_INC_EVENTS,
|
||||
};
|
||||
|
||||
struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
|
||||
u32 port_num);
|
||||
struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev);
|
||||
int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
|
||||
u32 port_num, int index);
|
||||
#endif /* _COUNTERS_H_ */
|
||||
|
|
|
@ -65,6 +65,10 @@ static const struct ib_device_ops mana_ib_stats_ops = {
|
|||
.get_hw_stats = mana_ib_get_hw_stats,
|
||||
};
|
||||
|
||||
static const struct ib_device_ops mana_ib_device_stats_ops = {
|
||||
.alloc_hw_device_stats = mana_ib_alloc_hw_device_stats,
|
||||
};
|
||||
|
||||
static int mana_ib_netdev_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
|
@ -73,28 +77,31 @@ static int mana_ib_netdev_event(struct notifier_block *this,
|
|||
struct gdma_context *gc = dev->gdma_dev->gdma_context;
|
||||
struct mana_context *mc = gc->mana.driver_data;
|
||||
struct net_device *ndev;
|
||||
int i;
|
||||
|
||||
/* Only process events from our parent device */
|
||||
if (event_dev != mc->ports[0])
|
||||
return NOTIFY_DONE;
|
||||
for (i = 0; i < dev->ib_dev.phys_port_cnt; i++)
|
||||
if (event_dev == mc->ports[i]) {
|
||||
switch (event) {
|
||||
case NETDEV_CHANGEUPPER:
|
||||
ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
|
||||
/*
|
||||
* RDMA core will setup GID based on updated netdev.
|
||||
* It's not possible to race with the core as rtnl lock is being
|
||||
* held.
|
||||
*/
|
||||
ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_CHANGEUPPER:
|
||||
ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
|
||||
/*
|
||||
* RDMA core will setup GID based on updated netdev.
|
||||
* It's not possible to race with the core as rtnl lock is being
|
||||
* held.
|
||||
*/
|
||||
ib_device_set_netdev(&dev->ib_dev, ndev, 1);
|
||||
/* mana_get_primary_netdev() returns ndev with refcount held */
|
||||
if (ndev)
|
||||
netdev_put(ndev, &dev->dev_tracker);
|
||||
|
||||
/* mana_get_primary_netdev() returns ndev with refcount held */
|
||||
netdev_put(ndev, &dev->dev_tracker);
|
||||
|
||||
return NOTIFY_OK;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int mana_ib_probe(struct auxiliary_device *adev,
|
||||
|
@ -107,7 +114,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
|
|||
struct net_device *ndev;
|
||||
struct mana_ib_dev *dev;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
dev = ib_alloc_device(mana_ib_dev, ib_dev);
|
||||
if (!dev)
|
||||
|
@ -122,51 +129,56 @@ static int mana_ib_probe(struct auxiliary_device *adev,
|
|||
|
||||
if (mana_ib_is_rnic(dev)) {
|
||||
dev->ib_dev.phys_port_cnt = 1;
|
||||
ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
|
||||
if (!ndev) {
|
||||
ret = -ENODEV;
|
||||
ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
|
||||
goto free_ib_device;
|
||||
}
|
||||
ether_addr_copy(mac_addr, ndev->dev_addr);
|
||||
addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
|
||||
ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
|
||||
/* mana_get_primary_netdev() returns ndev with refcount held */
|
||||
netdev_put(ndev, &dev->dev_tracker);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
|
||||
goto free_ib_device;
|
||||
}
|
||||
|
||||
dev->nb.notifier_call = mana_ib_netdev_event;
|
||||
ret = register_netdevice_notifier(&dev->nb);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
|
||||
ret);
|
||||
goto free_ib_device;
|
||||
}
|
||||
|
||||
addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, mc->ports[0]->dev_addr);
|
||||
ret = mana_ib_gd_query_adapter_caps(dev);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
|
||||
goto deregister_net_notifier;
|
||||
goto free_ib_device;
|
||||
}
|
||||
|
||||
ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
|
||||
if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT)
|
||||
ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops);
|
||||
|
||||
ret = mana_ib_create_eqs(dev);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
|
||||
goto deregister_net_notifier;
|
||||
goto free_ib_device;
|
||||
}
|
||||
|
||||
ret = mana_ib_gd_create_rnic_adapter(dev);
|
||||
if (ret)
|
||||
goto destroy_eqs;
|
||||
|
||||
ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
|
||||
if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_MULTI_PORTS_SUPPORT)
|
||||
dev->ib_dev.phys_port_cnt = mc->num_ports;
|
||||
|
||||
for (i = 0; i < dev->ib_dev.phys_port_cnt; i++) {
|
||||
ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
|
||||
if (!ndev) {
|
||||
ret = -ENODEV;
|
||||
ibdev_err(&dev->ib_dev,
|
||||
"Failed to get netdev for IB port %d", i + 1);
|
||||
goto destroy_rnic;
|
||||
}
|
||||
ether_addr_copy(mac_addr, ndev->dev_addr);
|
||||
ret = ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
|
||||
/* mana_get_primary_netdev() returns ndev with refcount held */
|
||||
netdev_put(ndev, &dev->dev_tracker);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
|
||||
goto destroy_rnic;
|
||||
}
|
||||
ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
|
||||
goto destroy_rnic;
|
||||
}
|
||||
}
|
||||
dev->nb.notifier_call = mana_ib_netdev_event;
|
||||
ret = register_netdevice_notifier(&dev->nb);
|
||||
if (ret) {
|
||||
ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
|
||||
ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d", ret);
|
||||
goto destroy_rnic;
|
||||
}
|
||||
} else {
|
||||
|
@ -182,7 +194,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
|
|||
MANA_AV_BUFFER_SIZE, 0);
|
||||
if (!dev->av_pool) {
|
||||
ret = -ENOMEM;
|
||||
goto destroy_rnic;
|
||||
goto deregister_net_notifier;
|
||||
}
|
||||
|
||||
ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
|
||||
|
@ -199,15 +211,15 @@ static int mana_ib_probe(struct auxiliary_device *adev,
|
|||
|
||||
deallocate_pool:
|
||||
dma_pool_destroy(dev->av_pool);
|
||||
deregister_net_notifier:
|
||||
if (mana_ib_is_rnic(dev))
|
||||
unregister_netdevice_notifier(&dev->nb);
|
||||
destroy_rnic:
|
||||
if (mana_ib_is_rnic(dev))
|
||||
mana_ib_gd_destroy_rnic_adapter(dev);
|
||||
destroy_eqs:
|
||||
if (mana_ib_is_rnic(dev))
|
||||
mana_ib_destroy_eqs(dev);
|
||||
deregister_net_notifier:
|
||||
if (mana_ib_is_rnic(dev))
|
||||
unregister_netdevice_notifier(&dev->nb);
|
||||
free_ib_device:
|
||||
xa_destroy(&dev->qp_table_wq);
|
||||
ib_dealloc_device(&dev->ib_dev);
|
||||
|
@ -221,9 +233,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
|
|||
ib_unregister_device(&dev->ib_dev);
|
||||
dma_pool_destroy(dev->av_pool);
|
||||
if (mana_ib_is_rnic(dev)) {
|
||||
unregister_netdevice_notifier(&dev->nb);
|
||||
mana_ib_gd_destroy_rnic_adapter(dev);
|
||||
mana_ib_destroy_eqs(dev);
|
||||
unregister_netdevice_notifier(&dev->nb);
|
||||
}
|
||||
xa_destroy(&dev->qp_table_wq);
|
||||
ib_dealloc_device(&dev->ib_dev);
|
||||
|
|
|
@ -563,8 +563,14 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
|
|||
immutable->gid_tbl_len = attr.gid_tbl_len;
|
||||
|
||||
if (mana_ib_is_rnic(dev)) {
|
||||
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
|
||||
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
if (port_num == 1) {
|
||||
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
|
||||
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
} else {
|
||||
immutable->core_cap_flags = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
|
||||
| RDMA_CORE_CAP_ETH_AH;
|
||||
immutable->max_mad_size = 0;
|
||||
}
|
||||
} else {
|
||||
immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
|
||||
}
|
||||
|
@ -633,8 +639,9 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port,
|
|||
props->pkey_tbl_len = 1;
|
||||
if (mana_ib_is_rnic(dev)) {
|
||||
props->gid_tbl_len = 16;
|
||||
props->port_cap_flags = IB_PORT_CM_SUP;
|
||||
props->ip_gids = true;
|
||||
if (port == 1)
|
||||
props->port_cap_flags = IB_PORT_CM_SUP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -210,6 +210,7 @@ enum mana_ib_command_code {
|
|||
MANA_IB_DESTROY_RC_QP = 0x3000b,
|
||||
MANA_IB_SET_QP_STATE = 0x3000d,
|
||||
MANA_IB_QUERY_VF_COUNTERS = 0x30022,
|
||||
MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023,
|
||||
};
|
||||
|
||||
struct mana_ib_query_adapter_caps_req {
|
||||
|
@ -218,6 +219,8 @@ struct mana_ib_query_adapter_caps_req {
|
|||
|
||||
enum mana_ib_adapter_features {
|
||||
MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
|
||||
MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5),
|
||||
MANA_IB_FEATURE_MULTI_PORTS_SUPPORT = BIT(6),
|
||||
};
|
||||
|
||||
struct mana_ib_query_adapter_caps_resp {
|
||||
|
@ -514,6 +517,31 @@ struct mana_rnic_query_vf_cntrs_resp {
|
|||
u64 rate_inc_events;
|
||||
u64 num_qps_recovered;
|
||||
u64 current_rate;
|
||||
u64 dup_rx_req;
|
||||
u64 tx_bytes;
|
||||
u64 rx_bytes;
|
||||
u64 rx_send_req;
|
||||
u64 rx_write_req;
|
||||
u64 rx_read_req;
|
||||
u64 tx_pkt;
|
||||
u64 rx_pkt;
|
||||
}; /* HW Data */
|
||||
|
||||
struct mana_rnic_query_device_cntrs_req {
|
||||
struct gdma_req_hdr hdr;
|
||||
mana_handle_t adapter;
|
||||
}; /* HW Data */
|
||||
|
||||
struct mana_rnic_query_device_cntrs_resp {
|
||||
struct gdma_resp_hdr hdr;
|
||||
u32 sent_cnps;
|
||||
u32 received_ecns;
|
||||
u32 reserved1;
|
||||
u32 received_cnp_count;
|
||||
u32 qp_congested_events;
|
||||
u32 qp_recovered_events;
|
||||
u32 rate_inc_events;
|
||||
u32 reserved2;
|
||||
}; /* HW Data */
|
||||
|
||||
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
|
||||
|
|
|
@ -772,7 +772,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
req.ah_attr.dest_port = ROCE_V2_UDP_DPORT;
|
||||
req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
|
||||
ibqp->qp_num, attr->dest_qp_num);
|
||||
req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class;
|
||||
req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
|
||||
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
|
||||
}
|
||||
|
||||
|
|
|
@ -6,8 +6,12 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
#include <net/mana/mana.h>
|
||||
#include <net/mana/hw_channel.h>
|
||||
|
||||
struct dentry *mana_debugfs_root;
|
||||
|
||||
|
@ -66,6 +70,24 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
|
|||
mana_gd_init_vf_regs(pdev);
|
||||
}
|
||||
|
||||
/* Suppress logging when we set timeout to zero */
|
||||
bool mana_need_log(struct gdma_context *gc, int err)
|
||||
{
|
||||
struct hw_channel_context *hwc;
|
||||
|
||||
if (err != -ETIMEDOUT)
|
||||
return true;
|
||||
|
||||
if (!gc)
|
||||
return true;
|
||||
|
||||
hwc = gc->hwc.driver_data;
|
||||
if (hwc && hwc->hwc_timeout == 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int mana_gd_query_max_resources(struct pci_dev *pdev)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
|
@ -83,8 +105,15 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev)
|
|||
return err ? err : -EPROTO;
|
||||
}
|
||||
|
||||
if (gc->num_msix_usable > resp.max_msix)
|
||||
gc->num_msix_usable = resp.max_msix;
|
||||
if (!pci_msix_can_alloc_dyn(pdev)) {
|
||||
if (gc->num_msix_usable > resp.max_msix)
|
||||
gc->num_msix_usable = resp.max_msix;
|
||||
} else {
|
||||
/* If dynamic allocation is enabled we have already allocated
|
||||
* hwc msi
|
||||
*/
|
||||
gc->num_msix_usable = min(resp.max_msix, num_online_cpus() + 1);
|
||||
}
|
||||
|
||||
if (gc->num_msix_usable <= 1)
|
||||
return -ENOSPC;
|
||||
|
@ -269,8 +298,9 @@ static int mana_gd_disable_queue(struct gdma_queue *queue)
|
|||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
|
||||
resp.hdr.status);
|
||||
if (mana_need_log(gc, err))
|
||||
dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
|
||||
resp.hdr.status);
|
||||
return err ? err : -EPROTO;
|
||||
}
|
||||
|
||||
|
@ -355,11 +385,113 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
|
|||
}
|
||||
EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
|
||||
|
||||
#define MANA_SERVICE_PERIOD 10
|
||||
|
||||
static void mana_serv_fpga(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_bus *bus, *parent;
|
||||
|
||||
pci_lock_rescan_remove();
|
||||
|
||||
bus = pdev->bus;
|
||||
if (!bus) {
|
||||
dev_err(&pdev->dev, "MANA service: no bus\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
parent = bus->parent;
|
||||
if (!parent) {
|
||||
dev_err(&pdev->dev, "MANA service: no parent bus\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
pci_stop_and_remove_bus_device(bus->self);
|
||||
|
||||
msleep(MANA_SERVICE_PERIOD * 1000);
|
||||
|
||||
pci_rescan_bus(parent);
|
||||
|
||||
out:
|
||||
pci_unlock_rescan_remove();
|
||||
}
|
||||
|
||||
static void mana_serv_reset(struct pci_dev *pdev)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
struct hw_channel_context *hwc;
|
||||
|
||||
if (!gc) {
|
||||
dev_err(&pdev->dev, "MANA service: no GC\n");
|
||||
return;
|
||||
}
|
||||
|
||||
hwc = gc->hwc.driver_data;
|
||||
if (!hwc) {
|
||||
dev_err(&pdev->dev, "MANA service: no HWC\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* HWC is not responding in this case, so don't wait */
|
||||
hwc->hwc_timeout = 0;
|
||||
|
||||
dev_info(&pdev->dev, "MANA reset cycle start\n");
|
||||
|
||||
mana_gd_suspend(pdev, PMSG_SUSPEND);
|
||||
|
||||
msleep(MANA_SERVICE_PERIOD * 1000);
|
||||
|
||||
mana_gd_resume(pdev);
|
||||
|
||||
dev_info(&pdev->dev, "MANA reset cycle completed\n");
|
||||
|
||||
out:
|
||||
gc->in_service = false;
|
||||
}
|
||||
|
||||
struct mana_serv_work {
|
||||
struct work_struct serv_work;
|
||||
struct pci_dev *pdev;
|
||||
enum gdma_eqe_type type;
|
||||
};
|
||||
|
||||
static void mana_serv_func(struct work_struct *w)
|
||||
{
|
||||
struct mana_serv_work *mns_wk;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
mns_wk = container_of(w, struct mana_serv_work, serv_work);
|
||||
pdev = mns_wk->pdev;
|
||||
|
||||
if (!pdev)
|
||||
goto out;
|
||||
|
||||
switch (mns_wk->type) {
|
||||
case GDMA_EQE_HWC_FPGA_RECONFIG:
|
||||
mana_serv_fpga(pdev);
|
||||
break;
|
||||
|
||||
case GDMA_EQE_HWC_RESET_REQUEST:
|
||||
mana_serv_reset(pdev);
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(&pdev->dev, "MANA service: unknown type %d\n",
|
||||
mns_wk->type);
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
pci_dev_put(pdev);
|
||||
kfree(mns_wk);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static void mana_gd_process_eqe(struct gdma_queue *eq)
|
||||
{
|
||||
u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
|
||||
struct gdma_context *gc = eq->gdma_dev->gdma_context;
|
||||
struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
|
||||
struct mana_serv_work *mns_wk;
|
||||
union gdma_eqe_info eqe_info;
|
||||
enum gdma_eqe_type type;
|
||||
struct gdma_event event;
|
||||
|
@ -404,6 +536,35 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
|
|||
eq->eq.callback(eq->eq.context, eq, &event);
|
||||
break;
|
||||
|
||||
case GDMA_EQE_HWC_FPGA_RECONFIG:
|
||||
case GDMA_EQE_HWC_RESET_REQUEST:
|
||||
dev_info(gc->dev, "Recv MANA service type:%d\n", type);
|
||||
|
||||
if (gc->in_service) {
|
||||
dev_info(gc->dev, "Already in service\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (!try_module_get(THIS_MODULE)) {
|
||||
dev_info(gc->dev, "Module is unloading\n");
|
||||
break;
|
||||
}
|
||||
|
||||
mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
|
||||
if (!mns_wk) {
|
||||
module_put(THIS_MODULE);
|
||||
break;
|
||||
}
|
||||
|
||||
dev_info(gc->dev, "Start MANA service type:%d\n", type);
|
||||
gc->in_service = true;
|
||||
mns_wk->pdev = to_pci_dev(gc->dev);
|
||||
mns_wk->type = type;
|
||||
pci_dev_get(mns_wk->pdev);
|
||||
INIT_WORK(&mns_wk->serv_work, mana_serv_func);
|
||||
schedule_work(&mns_wk->serv_work);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -486,7 +647,9 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
|
|||
}
|
||||
|
||||
queue->eq.msix_index = msi_index;
|
||||
gic = &gc->irq_contexts[msi_index];
|
||||
gic = xa_load(&gc->irq_contexts, msi_index);
|
||||
if (WARN_ON(!gic))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&gic->lock, flags);
|
||||
list_add_rcu(&queue->entry, &gic->eq_list);
|
||||
|
@ -495,7 +658,7 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mana_gd_deregiser_irq(struct gdma_queue *queue)
|
||||
static void mana_gd_deregister_irq(struct gdma_queue *queue)
|
||||
{
|
||||
struct gdma_dev *gd = queue->gdma_dev;
|
||||
struct gdma_irq_context *gic;
|
||||
|
@ -511,7 +674,10 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
|
|||
if (WARN_ON(msix_index >= gc->num_msix_usable))
|
||||
return;
|
||||
|
||||
gic = &gc->irq_contexts[msix_index];
|
||||
gic = xa_load(&gc->irq_contexts, msix_index);
|
||||
if (WARN_ON(!gic))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&gic->lock, flags);
|
||||
list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
|
||||
if (queue == eq) {
|
||||
|
@ -545,7 +711,8 @@ int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
|
|||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err) {
|
||||
dev_err(dev, "test_eq failed: %d\n", err);
|
||||
if (mana_need_log(gc, err))
|
||||
dev_err(dev, "test_eq failed: %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -580,11 +747,11 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
|
|||
|
||||
if (flush_evenets) {
|
||||
err = mana_gd_test_eq(gc, queue);
|
||||
if (err)
|
||||
if (err && mana_need_log(gc, err))
|
||||
dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
|
||||
}
|
||||
|
||||
mana_gd_deregiser_irq(queue);
|
||||
mana_gd_deregister_irq(queue);
|
||||
|
||||
if (queue->eq.disable_needed)
|
||||
mana_gd_disable_queue(queue);
|
||||
|
@ -726,8 +893,9 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
|
|||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
if (mana_need_log(gc, err))
|
||||
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
|
@ -1027,8 +1195,9 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
|
|||
|
||||
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
|
||||
if (err || resp.hdr.status) {
|
||||
dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
if (mana_need_log(gc, err))
|
||||
dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
if (!err)
|
||||
err = -EPROTO;
|
||||
}
|
||||
|
@ -1291,7 +1460,49 @@ void mana_gd_free_res_map(struct gdma_resource *r)
|
|||
r->size = 0;
|
||||
}
|
||||
|
||||
static int irq_setup(unsigned int *irqs, unsigned int len, int node)
|
||||
/*
|
||||
* Spread on CPUs with the following heuristics:
|
||||
*
|
||||
* 1. No more than one IRQ per CPU, if possible;
|
||||
* 2. NUMA locality is the second priority;
|
||||
* 3. Sibling dislocality is the last priority.
|
||||
*
|
||||
* Let's consider this topology:
|
||||
*
|
||||
* Node 0 1
|
||||
* Core 0 1 2 3
|
||||
* CPU 0 1 2 3 4 5 6 7
|
||||
*
|
||||
* The most performant IRQ distribution based on the above topology
|
||||
* and heuristics may look like this:
|
||||
*
|
||||
* IRQ Nodes Cores CPUs
|
||||
* 0 1 0 0-1
|
||||
* 1 1 1 2-3
|
||||
* 2 1 0 0-1
|
||||
* 3 1 1 2-3
|
||||
* 4 2 2 4-5
|
||||
* 5 2 3 6-7
|
||||
* 6 2 2 4-5
|
||||
* 7 2 3 6-7
|
||||
*
|
||||
* The heuristics is implemented as follows.
|
||||
*
|
||||
* The outer for_each() loop resets the 'weight' to the actual number
|
||||
* of CPUs in the hop. Then inner for_each() loop decrements it by the
|
||||
* number of sibling groups (cores) while assigning first set of IRQs
|
||||
* to each group. IRQs 0 and 1 above are distributed this way.
|
||||
*
|
||||
* Now, because NUMA locality is more important, we should walk the
|
||||
* same set of siblings and assign 2nd set of IRQs (2 and 3), and it's
|
||||
* implemented by the medium while() loop. We do like this unless the
|
||||
* number of IRQs assigned on this hop will not become equal to number
|
||||
* of CPUs in the hop (weight == 0). Then we switch to the next hop and
|
||||
* do the same thing.
|
||||
*/
|
||||
|
||||
static int irq_setup(unsigned int *irqs, unsigned int len, int node,
|
||||
bool skip_first_cpu)
|
||||
{
|
||||
const struct cpumask *next, *prev = cpu_none_mask;
|
||||
cpumask_var_t cpus __free(free_cpumask_var);
|
||||
|
@ -1306,11 +1517,18 @@ static int irq_setup(unsigned int *irqs, unsigned int len, int node)
|
|||
while (weight > 0) {
|
||||
cpumask_andnot(cpus, next, prev);
|
||||
for_each_cpu(cpu, cpus) {
|
||||
if (len-- == 0)
|
||||
goto done;
|
||||
irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
|
||||
cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
|
||||
--weight;
|
||||
|
||||
if (unlikely(skip_first_cpu)) {
|
||||
skip_first_cpu = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (len-- == 0)
|
||||
goto done;
|
||||
|
||||
irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
|
||||
}
|
||||
}
|
||||
prev = next;
|
||||
|
@ -1320,47 +1538,108 @@ done:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mana_gd_setup_irqs(struct pci_dev *pdev)
|
||||
static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
unsigned int max_queues_per_port;
|
||||
struct gdma_irq_context *gic;
|
||||
unsigned int max_irqs, cpu;
|
||||
int start_irq_index = 1;
|
||||
int nvec, *irqs, irq;
|
||||
int err, i = 0, j;
|
||||
bool skip_first_cpu = false;
|
||||
int *irqs, irq, err, i;
|
||||
|
||||
irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
|
||||
if (!irqs)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* While processing the next pci irq vector, we start with index 1,
|
||||
* as IRQ vector at index 0 is already processed for HWC.
|
||||
* However, the population of irqs array starts with index 0, to be
|
||||
* further used in irq_setup()
|
||||
*/
|
||||
for (i = 1; i <= nvec; i++) {
|
||||
gic = kzalloc(sizeof(*gic), GFP_KERNEL);
|
||||
if (!gic) {
|
||||
err = -ENOMEM;
|
||||
goto free_irq;
|
||||
}
|
||||
gic->handler = mana_gd_process_eq_events;
|
||||
INIT_LIST_HEAD(&gic->eq_list);
|
||||
spin_lock_init(&gic->lock);
|
||||
|
||||
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
|
||||
i - 1, pci_name(pdev));
|
||||
|
||||
/* one pci vector is already allocated for HWC */
|
||||
irqs[i - 1] = pci_irq_vector(pdev, i);
|
||||
if (irqs[i - 1] < 0) {
|
||||
err = irqs[i - 1];
|
||||
goto free_current_gic;
|
||||
}
|
||||
|
||||
err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic);
|
||||
if (err)
|
||||
goto free_current_gic;
|
||||
|
||||
xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/*
|
||||
* When calling irq_setup() for dynamically added IRQs, if number of
|
||||
* CPUs is more than or equal to allocated MSI-X, we need to skip the
|
||||
* first CPU sibling group since they are already affinitized to HWC IRQ
|
||||
*/
|
||||
cpus_read_lock();
|
||||
max_queues_per_port = num_online_cpus();
|
||||
if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
|
||||
max_queues_per_port = MANA_MAX_NUM_QUEUES;
|
||||
if (gc->num_msix_usable <= num_online_cpus())
|
||||
skip_first_cpu = true;
|
||||
|
||||
/* Need 1 interrupt for the Hardware communication Channel (HWC) */
|
||||
max_irqs = max_queues_per_port + 1;
|
||||
|
||||
nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
|
||||
if (nvec < 0) {
|
||||
err = irq_setup(irqs, nvec, gc->numa_node, skip_first_cpu);
|
||||
if (err) {
|
||||
cpus_read_unlock();
|
||||
return nvec;
|
||||
}
|
||||
if (nvec <= num_online_cpus())
|
||||
start_irq_index = 0;
|
||||
|
||||
irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
|
||||
if (!irqs) {
|
||||
err = -ENOMEM;
|
||||
goto free_irq_vector;
|
||||
goto free_irq;
|
||||
}
|
||||
|
||||
gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
|
||||
GFP_KERNEL);
|
||||
if (!gc->irq_contexts) {
|
||||
err = -ENOMEM;
|
||||
goto free_irq_array;
|
||||
cpus_read_unlock();
|
||||
kfree(irqs);
|
||||
return 0;
|
||||
|
||||
free_current_gic:
|
||||
kfree(gic);
|
||||
free_irq:
|
||||
for (i -= 1; i > 0; i--) {
|
||||
irq = pci_irq_vector(pdev, i);
|
||||
gic = xa_load(&gc->irq_contexts, i);
|
||||
if (WARN_ON(!gic))
|
||||
continue;
|
||||
|
||||
irq_update_affinity_hint(irq, NULL);
|
||||
free_irq(irq, gic);
|
||||
xa_erase(&gc->irq_contexts, i);
|
||||
kfree(gic);
|
||||
}
|
||||
kfree(irqs);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
struct gdma_irq_context *gic;
|
||||
int *irqs, *start_irqs, irq;
|
||||
unsigned int cpu;
|
||||
int err, i;
|
||||
|
||||
irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
|
||||
if (!irqs)
|
||||
return -ENOMEM;
|
||||
|
||||
start_irqs = irqs;
|
||||
|
||||
for (i = 0; i < nvec; i++) {
|
||||
gic = &gc->irq_contexts[i];
|
||||
gic = kzalloc(sizeof(*gic), GFP_KERNEL);
|
||||
if (!gic) {
|
||||
err = -ENOMEM;
|
||||
goto free_irq;
|
||||
}
|
||||
|
||||
gic->handler = mana_gd_process_eq_events;
|
||||
INIT_LIST_HEAD(&gic->eq_list);
|
||||
spin_lock_init(&gic->lock);
|
||||
|
@ -1372,69 +1651,128 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
|
|||
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
|
||||
i - 1, pci_name(pdev));
|
||||
|
||||
irq = pci_irq_vector(pdev, i);
|
||||
if (irq < 0) {
|
||||
err = irq;
|
||||
goto free_irq;
|
||||
irqs[i] = pci_irq_vector(pdev, i);
|
||||
if (irqs[i] < 0) {
|
||||
err = irqs[i];
|
||||
goto free_current_gic;
|
||||
}
|
||||
|
||||
if (!i) {
|
||||
err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
|
||||
if (err)
|
||||
goto free_irq;
|
||||
err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic);
|
||||
if (err)
|
||||
goto free_current_gic;
|
||||
|
||||
/* If number of IRQ is one extra than number of online CPUs,
|
||||
* then we need to assign IRQ0 (hwc irq) and IRQ1 to
|
||||
* same CPU.
|
||||
* Else we will use different CPUs for IRQ0 and IRQ1.
|
||||
* Also we are using cpumask_local_spread instead of
|
||||
* cpumask_first for the node, because the node can be
|
||||
* mem only.
|
||||
*/
|
||||
if (start_irq_index) {
|
||||
cpu = cpumask_local_spread(i, gc->numa_node);
|
||||
irq_set_affinity_and_hint(irq, cpumask_of(cpu));
|
||||
} else {
|
||||
irqs[start_irq_index] = irq;
|
||||
}
|
||||
} else {
|
||||
irqs[i - start_irq_index] = irq;
|
||||
err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
|
||||
gic->name, gic);
|
||||
if (err)
|
||||
goto free_irq;
|
||||
}
|
||||
xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
|
||||
}
|
||||
|
||||
err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
|
||||
if (err)
|
||||
goto free_irq;
|
||||
/* If number of IRQ is one extra than number of online CPUs,
|
||||
* then we need to assign IRQ0 (hwc irq) and IRQ1 to
|
||||
* same CPU.
|
||||
* Else we will use different CPUs for IRQ0 and IRQ1.
|
||||
* Also we are using cpumask_local_spread instead of
|
||||
* cpumask_first for the node, because the node can be
|
||||
* mem only.
|
||||
*/
|
||||
cpus_read_lock();
|
||||
if (nvec > num_online_cpus()) {
|
||||
cpu = cpumask_local_spread(0, gc->numa_node);
|
||||
irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu));
|
||||
irqs++;
|
||||
nvec -= 1;
|
||||
}
|
||||
|
||||
err = irq_setup(irqs, nvec, gc->numa_node, false);
|
||||
if (err) {
|
||||
cpus_read_unlock();
|
||||
goto free_irq;
|
||||
}
|
||||
|
||||
gc->max_num_msix = nvec;
|
||||
gc->num_msix_usable = nvec;
|
||||
cpus_read_unlock();
|
||||
kfree(irqs);
|
||||
kfree(start_irqs);
|
||||
return 0;
|
||||
|
||||
free_current_gic:
|
||||
kfree(gic);
|
||||
free_irq:
|
||||
for (j = i - 1; j >= 0; j--) {
|
||||
irq = pci_irq_vector(pdev, j);
|
||||
gic = &gc->irq_contexts[j];
|
||||
for (i -= 1; i >= 0; i--) {
|
||||
irq = pci_irq_vector(pdev, i);
|
||||
gic = xa_load(&gc->irq_contexts, i);
|
||||
if (WARN_ON(!gic))
|
||||
continue;
|
||||
|
||||
irq_update_affinity_hint(irq, NULL);
|
||||
free_irq(irq, gic);
|
||||
xa_erase(&gc->irq_contexts, i);
|
||||
kfree(gic);
|
||||
}
|
||||
|
||||
kfree(gc->irq_contexts);
|
||||
gc->irq_contexts = NULL;
|
||||
free_irq_array:
|
||||
kfree(irqs);
|
||||
free_irq_vector:
|
||||
cpus_read_unlock();
|
||||
pci_free_irq_vectors(pdev);
|
||||
kfree(start_irqs);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
unsigned int max_irqs, min_irqs;
|
||||
int nvec, err;
|
||||
|
||||
if (pci_msix_can_alloc_dyn(pdev)) {
|
||||
max_irqs = 1;
|
||||
min_irqs = 1;
|
||||
} else {
|
||||
/* Need 1 interrupt for HWC */
|
||||
max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1;
|
||||
min_irqs = 2;
|
||||
}
|
||||
|
||||
nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX);
|
||||
if (nvec < 0)
|
||||
return nvec;
|
||||
|
||||
err = mana_gd_setup_irqs(pdev, nvec);
|
||||
if (err) {
|
||||
pci_free_irq_vectors(pdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
gc->num_msix_usable = nvec;
|
||||
gc->max_num_msix = nvec;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
struct msi_map irq_map;
|
||||
int max_irqs, i, err;
|
||||
|
||||
if (!pci_msix_can_alloc_dyn(pdev))
|
||||
/* remain irqs are already allocated with HWC IRQ */
|
||||
return 0;
|
||||
|
||||
/* allocate only remaining IRQs*/
|
||||
max_irqs = gc->num_msix_usable - 1;
|
||||
|
||||
for (i = 1; i <= max_irqs; i++) {
|
||||
irq_map = pci_msix_alloc_irq_at(pdev, i, NULL);
|
||||
if (!irq_map.virq) {
|
||||
err = irq_map.index;
|
||||
/* caller will handle cleaning up all allocated
|
||||
* irqs, after HWC is destroyed
|
||||
*/
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
err = mana_gd_setup_dyn_irqs(pdev, max_irqs);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
gc->max_num_msix = gc->max_num_msix + max_irqs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mana_gd_remove_irqs(struct pci_dev *pdev)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
|
@ -1449,19 +1787,21 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
|
|||
if (irq < 0)
|
||||
continue;
|
||||
|
||||
gic = &gc->irq_contexts[i];
|
||||
gic = xa_load(&gc->irq_contexts, i);
|
||||
if (WARN_ON(!gic))
|
||||
continue;
|
||||
|
||||
/* Need to clear the hint before free_irq */
|
||||
irq_update_affinity_hint(irq, NULL);
|
||||
free_irq(irq, gic);
|
||||
xa_erase(&gc->irq_contexts, i);
|
||||
kfree(gic);
|
||||
}
|
||||
|
||||
pci_free_irq_vectors(pdev);
|
||||
|
||||
gc->max_num_msix = 0;
|
||||
gc->num_msix_usable = 0;
|
||||
kfree(gc->irq_contexts);
|
||||
gc->irq_contexts = NULL;
|
||||
}
|
||||
|
||||
static int mana_gd_setup(struct pci_dev *pdev)
|
||||
|
@ -1476,9 +1816,10 @@ static int mana_gd_setup(struct pci_dev *pdev)
|
|||
if (!gc->service_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mana_gd_setup_irqs(pdev);
|
||||
err = mana_gd_setup_hwc_irqs(pdev);
|
||||
if (err) {
|
||||
dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
|
||||
dev_err(gc->dev, "Failed to setup IRQs for HWC creation: %d\n",
|
||||
err);
|
||||
goto free_workqueue;
|
||||
}
|
||||
|
||||
|
@ -1494,6 +1835,12 @@ static int mana_gd_setup(struct pci_dev *pdev)
|
|||
if (err)
|
||||
goto destroy_hwc;
|
||||
|
||||
err = mana_gd_setup_remaining_irqs(pdev);
|
||||
if (err) {
|
||||
dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err);
|
||||
goto destroy_hwc;
|
||||
}
|
||||
|
||||
err = mana_gd_detect_devices(pdev);
|
||||
if (err)
|
||||
goto destroy_hwc;
|
||||
|
@ -1574,6 +1921,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
gc->is_pf = mana_is_pf(pdev->device);
|
||||
gc->bar0_va = bar0_va;
|
||||
gc->dev = &pdev->dev;
|
||||
xa_init(&gc->irq_contexts);
|
||||
|
||||
if (gc->is_pf)
|
||||
gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
|
||||
|
@ -1608,6 +1956,7 @@ unmap_bar:
|
|||
*/
|
||||
debugfs_remove_recursive(gc->mana_pci_debugfs);
|
||||
gc->mana_pci_debugfs = NULL;
|
||||
xa_destroy(&gc->irq_contexts);
|
||||
pci_iounmap(pdev, bar0_va);
|
||||
free_gc:
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
|
@ -1633,6 +1982,8 @@ static void mana_gd_remove(struct pci_dev *pdev)
|
|||
|
||||
gc->mana_pci_debugfs = NULL;
|
||||
|
||||
xa_destroy(&gc->irq_contexts);
|
||||
|
||||
pci_iounmap(pdev, gc->bar0_va);
|
||||
|
||||
vfree(gc);
|
||||
|
@ -1644,7 +1995,7 @@ static void mana_gd_remove(struct pci_dev *pdev)
|
|||
}
|
||||
|
||||
/* The 'state' parameter is not used. */
|
||||
static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
|
||||
|
@ -1660,7 +2011,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
* fail -- if this happens, it's safer to just report an error than try to undo
|
||||
* what has been done.
|
||||
*/
|
||||
static int mana_gd_resume(struct pci_dev *pdev)
|
||||
int mana_gd_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct gdma_context *gc = pci_get_drvdata(pdev);
|
||||
int err;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
/* Copyright (c) 2021, Microsoft Corporation. */
|
||||
|
||||
#include <net/mana/gdma.h>
|
||||
#include <net/mana/mana.h>
|
||||
#include <net/mana/hw_channel.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
|
@ -879,7 +880,9 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
|
|||
|
||||
if (!wait_for_completion_timeout(&ctx->comp_event,
|
||||
(msecs_to_jiffies(hwc->hwc_timeout)))) {
|
||||
dev_err(hwc->dev, "HWC: Request timed out!\n");
|
||||
if (hwc->hwc_timeout != 0)
|
||||
dev_err(hwc->dev, "HWC: Request timed out!\n");
|
||||
|
||||
err = -ETIMEDOUT;
|
||||
goto out;
|
||||
}
|
||||
|
@ -890,8 +893,13 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
|
|||
}
|
||||
|
||||
if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
|
||||
dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
|
||||
ctx->status_code);
|
||||
if (ctx->status_code == GDMA_STATUS_CMD_UNSUPPORTED) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
if (req_msg->req.msg_type != MANA_QUERY_PHY_STAT)
|
||||
dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
|
||||
ctx->status_code);
|
||||
err = -EPROTO;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/filter.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <net/checksum.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
|
@ -46,6 +47,15 @@ static const struct file_operations mana_dbg_q_fops = {
|
|||
.read = mana_dbg_q_read,
|
||||
};
|
||||
|
||||
static bool mana_en_need_log(struct mana_port_context *apc, int err)
|
||||
{
|
||||
if (apc && apc->ac && apc->ac->gdma_dev &&
|
||||
apc->ac->gdma_dev->gdma_context)
|
||||
return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
|
||||
else
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Microsoft Azure Network Adapter (MANA) functions */
|
||||
|
||||
static int mana_open(struct net_device *ndev)
|
||||
|
@ -250,10 +260,10 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
struct netdev_queue *net_txq;
|
||||
struct mana_stats_tx *tx_stats;
|
||||
struct gdma_queue *gdma_sq;
|
||||
int err, len, num_gso_seg;
|
||||
unsigned int csum_type;
|
||||
struct mana_txq *txq;
|
||||
struct mana_cq *cq;
|
||||
int err, len;
|
||||
|
||||
if (unlikely(!apc->port_is_up))
|
||||
goto tx_drop;
|
||||
|
@ -406,6 +416,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
skb_queue_tail(&txq->pending_skbs, skb);
|
||||
|
||||
len = skb->len;
|
||||
num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
|
||||
net_txq = netdev_get_tx_queue(ndev, txq_idx);
|
||||
|
||||
err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
|
||||
|
@ -430,10 +441,13 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
/* skb may be freed after mana_gd_post_work_request. Do not use it. */
|
||||
skb = NULL;
|
||||
|
||||
/* Populated the packet and bytes counters based on post GSO packet
|
||||
* calculations
|
||||
*/
|
||||
tx_stats = &txq->stats;
|
||||
u64_stats_update_begin(&tx_stats->syncp);
|
||||
tx_stats->packets++;
|
||||
tx_stats->bytes += len;
|
||||
tx_stats->packets += num_gso_seg;
|
||||
tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
|
||||
u64_stats_update_end(&tx_stats->syncp);
|
||||
|
||||
tx_busy:
|
||||
|
@ -773,8 +787,13 @@ static int mana_send_request(struct mana_context *ac, void *in_buf,
|
|||
err = mana_gd_send_request(gc, in_len, in_buf, out_len,
|
||||
out_buf);
|
||||
if (err || resp->status) {
|
||||
dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
|
||||
err, resp->status);
|
||||
if (err == -EOPNOTSUPP)
|
||||
return err;
|
||||
|
||||
if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
|
||||
mana_need_log(gc, err))
|
||||
dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
|
||||
err, resp->status);
|
||||
return err ? err : -EPROTO;
|
||||
}
|
||||
|
||||
|
@ -849,8 +868,10 @@ static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
|
|||
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
|
||||
sizeof(resp));
|
||||
if (err) {
|
||||
netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
|
||||
err);
|
||||
if (mana_en_need_log(apc, err))
|
||||
netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
|
||||
err);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -905,8 +926,10 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
|
|||
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
|
||||
sizeof(resp));
|
||||
if (err) {
|
||||
netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
|
||||
err);
|
||||
if (mana_en_need_log(apc, err))
|
||||
netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
|
||||
err);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1136,7 +1159,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
|
|||
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
|
||||
sizeof(resp));
|
||||
if (err) {
|
||||
netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
|
||||
if (mana_en_need_log(apc, err))
|
||||
netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1231,7 +1256,9 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
|
|||
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
|
||||
sizeof(resp));
|
||||
if (err) {
|
||||
netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
|
||||
if (mana_en_need_log(apc, err))
|
||||
netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2610,6 +2637,88 @@ void mana_query_gf_stats(struct mana_port_context *apc)
|
|||
apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
|
||||
}
|
||||
|
||||
void mana_query_phy_stats(struct mana_port_context *apc)
|
||||
{
|
||||
struct mana_query_phy_stat_resp resp = {};
|
||||
struct mana_query_phy_stat_req req = {};
|
||||
struct net_device *ndev = apc->ndev;
|
||||
int err;
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
|
||||
sizeof(req), sizeof(resp));
|
||||
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
|
||||
sizeof(resp));
|
||||
if (err)
|
||||
return;
|
||||
|
||||
err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
|
||||
sizeof(resp));
|
||||
if (err || resp.hdr.status) {
|
||||
netdev_err(ndev,
|
||||
"Failed to query PHY stats: %d, resp:0x%x\n",
|
||||
err, resp.hdr.status);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Aggregate drop counters */
|
||||
apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
|
||||
apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
|
||||
|
||||
/* Per TC traffic Counters */
|
||||
apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
|
||||
apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
|
||||
apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
|
||||
apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
|
||||
apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
|
||||
apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
|
||||
apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
|
||||
apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
|
||||
apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
|
||||
apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
|
||||
apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
|
||||
apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
|
||||
apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
|
||||
apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
|
||||
apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
|
||||
apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
|
||||
|
||||
/* Per TC byte Counters */
|
||||
apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
|
||||
apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
|
||||
apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
|
||||
apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
|
||||
apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
|
||||
apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
|
||||
apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
|
||||
apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
|
||||
apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
|
||||
apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
|
||||
apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
|
||||
apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
|
||||
apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
|
||||
apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
|
||||
apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
|
||||
apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
|
||||
|
||||
/* Per TC pause Counters */
|
||||
apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
|
||||
apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
|
||||
apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
|
||||
apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
|
||||
apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
|
||||
apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
|
||||
apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
|
||||
apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
|
||||
apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
|
||||
apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
|
||||
apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
|
||||
apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
|
||||
apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
|
||||
apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
|
||||
apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
|
||||
apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
|
||||
}
|
||||
|
||||
static int mana_init_port(struct net_device *ndev)
|
||||
{
|
||||
struct mana_port_context *apc = netdev_priv(ndev);
|
||||
|
@ -2804,11 +2913,10 @@ static int mana_dealloc_queues(struct net_device *ndev)
|
|||
|
||||
apc->rss_state = TRI_STATE_FALSE;
|
||||
err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
|
||||
if (err) {
|
||||
if (err && mana_en_need_log(apc, err))
|
||||
netdev_err(ndev, "Failed to disable vPort: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Even in err case, still need to cleanup the vPort */
|
||||
mana_destroy_vport(apc);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -7,10 +7,12 @@
|
|||
|
||||
#include <net/mana/mana.h>
|
||||
|
||||
static const struct {
|
||||
struct mana_stats_desc {
|
||||
char name[ETH_GSTRING_LEN];
|
||||
u16 offset;
|
||||
} mana_eth_stats[] = {
|
||||
};
|
||||
|
||||
static const struct mana_stats_desc mana_eth_stats[] = {
|
||||
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
|
||||
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
|
||||
{"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
|
||||
|
@ -75,6 +77,59 @@ static const struct {
|
|||
rx_cqe_unknown_type)},
|
||||
};
|
||||
|
||||
static const struct mana_stats_desc mana_phy_stats[] = {
|
||||
{ "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) },
|
||||
{ "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) },
|
||||
{ "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) },
|
||||
{ "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) },
|
||||
{ "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) },
|
||||
{ "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) },
|
||||
{ "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) },
|
||||
{ "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) },
|
||||
{ "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) },
|
||||
{ "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) },
|
||||
{ "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) },
|
||||
{ "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) },
|
||||
{ "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) },
|
||||
{ "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) },
|
||||
{ "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) },
|
||||
{ "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) },
|
||||
{ "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) },
|
||||
{ "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) },
|
||||
{ "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) },
|
||||
{ "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) },
|
||||
{ "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) },
|
||||
{ "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) },
|
||||
{ "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) },
|
||||
{ "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) },
|
||||
{ "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) },
|
||||
{ "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) },
|
||||
{ "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) },
|
||||
{ "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) },
|
||||
{ "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) },
|
||||
{ "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) },
|
||||
{ "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) },
|
||||
{ "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) },
|
||||
{ "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) },
|
||||
{ "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) },
|
||||
{ "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) },
|
||||
{ "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) },
|
||||
{ "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) },
|
||||
{ "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) },
|
||||
{ "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) },
|
||||
{ "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) },
|
||||
{ "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) },
|
||||
{ "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) },
|
||||
{ "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) },
|
||||
{ "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) },
|
||||
{ "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) },
|
||||
{ "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) },
|
||||
{ "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) },
|
||||
{ "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) },
|
||||
{ "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) },
|
||||
{ "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) },
|
||||
};
|
||||
|
||||
static int mana_get_sset_count(struct net_device *ndev, int stringset)
|
||||
{
|
||||
struct mana_port_context *apc = netdev_priv(ndev);
|
||||
|
@ -83,8 +138,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
|
|||
if (stringset != ETH_SS_STATS)
|
||||
return -EINVAL;
|
||||
|
||||
return ARRAY_SIZE(mana_eth_stats) + num_queues *
|
||||
(MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
|
||||
return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) +
|
||||
num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
|
||||
}
|
||||
|
||||
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
|
||||
|
@ -99,6 +154,9 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
|
|||
for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
|
||||
ethtool_puts(&data, mana_eth_stats[i].name);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
|
||||
ethtool_puts(&data, mana_phy_stats[i].name);
|
||||
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
ethtool_sprintf(&data, "rx_%d_packets", i);
|
||||
ethtool_sprintf(&data, "rx_%d_bytes", i);
|
||||
|
@ -128,6 +186,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
|
|||
struct mana_port_context *apc = netdev_priv(ndev);
|
||||
unsigned int num_queues = apc->num_queues;
|
||||
void *eth_stats = &apc->eth_stats;
|
||||
void *phy_stats = &apc->phy_stats;
|
||||
struct mana_stats_rx *rx_stats;
|
||||
struct mana_stats_tx *tx_stats;
|
||||
unsigned int start;
|
||||
|
@ -151,9 +210,18 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
|
|||
/* we call mana function to update stats from GDMA */
|
||||
mana_query_gf_stats(apc);
|
||||
|
||||
/* We call this mana function to get the phy stats from GDMA and includes
|
||||
* aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
|
||||
* counters.
|
||||
*/
|
||||
mana_query_phy_stats(apc);
|
||||
|
||||
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
|
||||
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
|
||||
|
||||
for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
|
||||
data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
|
||||
|
||||
for (q = 0; q < num_queues; q++) {
|
||||
rx_stats = &apc->rxqs[q]->stats;
|
||||
|
||||
|
|
|
@ -2061,6 +2061,7 @@ static struct irq_chip hv_msi_irq_chip = {
|
|||
static struct msi_domain_ops hv_msi_ops = {
|
||||
.msi_prepare = hv_msi_prepare,
|
||||
.msi_free = hv_msi_free,
|
||||
.prepare_desc = pci_msix_prepare_desc,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -2082,7 +2083,7 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
|
|||
hbus->msi_info.ops = &hv_msi_ops;
|
||||
hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
|
||||
MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
|
||||
MSI_FLAG_PCI_MSIX);
|
||||
MSI_FLAG_PCI_MSIX | MSI_FLAG_PCI_MSIX_ALLOC_DYN);
|
||||
hbus->msi_info.handler = FLOW_HANDLER;
|
||||
hbus->msi_info.handler_name = FLOW_NAME;
|
||||
hbus->msi_info.data = hbus;
|
||||
|
|
|
@ -222,13 +222,14 @@ static void pci_irq_unmask_msix(struct irq_data *data)
|
|||
pci_msix_unmask(irq_data_get_msi_desc(data));
|
||||
}
|
||||
|
||||
static void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
|
||||
struct msi_desc *desc)
|
||||
void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
|
||||
struct msi_desc *desc)
|
||||
{
|
||||
/* Don't fiddle with preallocated MSI descriptors */
|
||||
if (!desc->pci.mask_base)
|
||||
msix_prepare_msi_desc(to_pci_dev(desc->dev), desc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_msix_prepare_desc);
|
||||
|
||||
static const struct msi_domain_template pci_msix_template = {
|
||||
.chip = {
|
||||
|
|
|
@ -679,6 +679,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
|
|||
struct irq_domain *parent);
|
||||
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
|
||||
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
|
||||
void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
|
||||
struct msi_desc *desc);
|
||||
#else /* CONFIG_PCI_MSI */
|
||||
static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
|
||||
{
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "shm_channel.h"
|
||||
|
||||
#define GDMA_STATUS_MORE_ENTRIES 0x00000105
|
||||
#define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff
|
||||
|
||||
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
|
||||
* them are naturally aligned and hence don't need __packed.
|
||||
|
@ -58,9 +59,10 @@ enum gdma_eqe_type {
|
|||
GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
|
||||
GDMA_EQE_HWC_INIT_DATA = 130,
|
||||
GDMA_EQE_HWC_INIT_DONE = 131,
|
||||
GDMA_EQE_HWC_SOC_RECONFIG = 132,
|
||||
GDMA_EQE_HWC_FPGA_RECONFIG = 132,
|
||||
GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
|
||||
GDMA_EQE_HWC_SOC_SERVICE = 134,
|
||||
GDMA_EQE_HWC_RESET_REQUEST = 135,
|
||||
GDMA_EQE_RNIC_QP_FATAL = 176,
|
||||
};
|
||||
|
||||
|
@ -388,7 +390,7 @@ struct gdma_context {
|
|||
unsigned int max_num_queues;
|
||||
unsigned int max_num_msix;
|
||||
unsigned int num_msix_usable;
|
||||
struct gdma_irq_context *irq_contexts;
|
||||
struct xarray irq_contexts;
|
||||
|
||||
/* L2 MTU */
|
||||
u16 adapter_mtu;
|
||||
|
@ -403,6 +405,8 @@ struct gdma_context {
|
|||
u32 test_event_eq_id;
|
||||
|
||||
bool is_pf;
|
||||
bool in_service;
|
||||
|
||||
phys_addr_t bar0_pa;
|
||||
void __iomem *bar0_va;
|
||||
void __iomem *shm_base;
|
||||
|
@ -578,12 +582,24 @@ enum {
|
|||
/* Driver can handle holes (zeros) in the device list */
|
||||
#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
|
||||
|
||||
/* Driver supports dynamic MSI-X vector allocation */
|
||||
#define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13)
|
||||
|
||||
/* Driver can self reset on EQE notification */
|
||||
#define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14)
|
||||
|
||||
/* Driver can self reset on FPGA Reconfig EQE notification */
|
||||
#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
|
||||
|
||||
#define GDMA_DRV_CAP_FLAGS1 \
|
||||
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
|
||||
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
|
||||
GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
|
||||
GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
|
||||
GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP)
|
||||
GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
|
||||
GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
|
||||
GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
|
||||
GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE)
|
||||
|
||||
#define GDMA_DRV_CAP_FLAGS2 0
|
||||
|
||||
|
@ -910,4 +926,9 @@ void mana_unregister_debugfs(void);
|
|||
|
||||
int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event);
|
||||
|
||||
int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state);
|
||||
int mana_gd_resume(struct pci_dev *pdev);
|
||||
|
||||
bool mana_need_log(struct gdma_context *gc, int err);
|
||||
|
||||
#endif /* _GDMA_H */
|
||||
|
|
|
@ -404,6 +404,65 @@ struct mana_ethtool_stats {
|
|||
u64 rx_cqe_unknown_type;
|
||||
};
|
||||
|
||||
struct mana_ethtool_phy_stats {
|
||||
/* Drop Counters */
|
||||
u64 rx_pkt_drop_phy;
|
||||
u64 tx_pkt_drop_phy;
|
||||
|
||||
/* Per TC traffic Counters */
|
||||
u64 rx_pkt_tc0_phy;
|
||||
u64 tx_pkt_tc0_phy;
|
||||
u64 rx_pkt_tc1_phy;
|
||||
u64 tx_pkt_tc1_phy;
|
||||
u64 rx_pkt_tc2_phy;
|
||||
u64 tx_pkt_tc2_phy;
|
||||
u64 rx_pkt_tc3_phy;
|
||||
u64 tx_pkt_tc3_phy;
|
||||
u64 rx_pkt_tc4_phy;
|
||||
u64 tx_pkt_tc4_phy;
|
||||
u64 rx_pkt_tc5_phy;
|
||||
u64 tx_pkt_tc5_phy;
|
||||
u64 rx_pkt_tc6_phy;
|
||||
u64 tx_pkt_tc6_phy;
|
||||
u64 rx_pkt_tc7_phy;
|
||||
u64 tx_pkt_tc7_phy;
|
||||
|
||||
u64 rx_byte_tc0_phy;
|
||||
u64 tx_byte_tc0_phy;
|
||||
u64 rx_byte_tc1_phy;
|
||||
u64 tx_byte_tc1_phy;
|
||||
u64 rx_byte_tc2_phy;
|
||||
u64 tx_byte_tc2_phy;
|
||||
u64 rx_byte_tc3_phy;
|
||||
u64 tx_byte_tc3_phy;
|
||||
u64 rx_byte_tc4_phy;
|
||||
u64 tx_byte_tc4_phy;
|
||||
u64 rx_byte_tc5_phy;
|
||||
u64 tx_byte_tc5_phy;
|
||||
u64 rx_byte_tc6_phy;
|
||||
u64 tx_byte_tc6_phy;
|
||||
u64 rx_byte_tc7_phy;
|
||||
u64 tx_byte_tc7_phy;
|
||||
|
||||
/* Per TC pause Counters */
|
||||
u64 rx_pause_tc0_phy;
|
||||
u64 tx_pause_tc0_phy;
|
||||
u64 rx_pause_tc1_phy;
|
||||
u64 tx_pause_tc1_phy;
|
||||
u64 rx_pause_tc2_phy;
|
||||
u64 tx_pause_tc2_phy;
|
||||
u64 rx_pause_tc3_phy;
|
||||
u64 tx_pause_tc3_phy;
|
||||
u64 rx_pause_tc4_phy;
|
||||
u64 tx_pause_tc4_phy;
|
||||
u64 rx_pause_tc5_phy;
|
||||
u64 tx_pause_tc5_phy;
|
||||
u64 rx_pause_tc6_phy;
|
||||
u64 tx_pause_tc6_phy;
|
||||
u64 rx_pause_tc7_phy;
|
||||
u64 tx_pause_tc7_phy;
|
||||
};
|
||||
|
||||
struct mana_context {
|
||||
struct gdma_dev *gdma_dev;
|
||||
|
||||
|
@ -474,6 +533,8 @@ struct mana_port_context {
|
|||
|
||||
struct mana_ethtool_stats eth_stats;
|
||||
|
||||
struct mana_ethtool_phy_stats phy_stats;
|
||||
|
||||
/* Debugfs */
|
||||
struct dentry *mana_port_debugfs;
|
||||
};
|
||||
|
@ -501,6 +562,7 @@ struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
|
|||
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
|
||||
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
|
||||
void mana_query_gf_stats(struct mana_port_context *apc);
|
||||
void mana_query_phy_stats(struct mana_port_context *apc);
|
||||
int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
|
||||
void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
|
||||
|
||||
|
@ -527,6 +589,7 @@ enum mana_command_code {
|
|||
MANA_FENCE_RQ = 0x20006,
|
||||
MANA_CONFIG_VPORT_RX = 0x20007,
|
||||
MANA_QUERY_VPORT_CONFIG = 0x20008,
|
||||
MANA_QUERY_PHY_STAT = 0x2000c,
|
||||
|
||||
/* Privileged commands for the PF mode */
|
||||
MANA_REGISTER_FILTER = 0x28000,
|
||||
|
@ -689,6 +752,74 @@ struct mana_query_gf_stat_resp {
|
|||
u64 tx_err_gdma;
|
||||
}; /* HW DATA */
|
||||
|
||||
/* Query phy stats */
|
||||
struct mana_query_phy_stat_req {
|
||||
struct gdma_req_hdr hdr;
|
||||
u64 req_stats;
|
||||
}; /* HW DATA */
|
||||
|
||||
struct mana_query_phy_stat_resp {
|
||||
struct gdma_resp_hdr hdr;
|
||||
u64 reported_stats;
|
||||
|
||||
/* Aggregate Drop Counters */
|
||||
u64 rx_pkt_drop_phy;
|
||||
u64 tx_pkt_drop_phy;
|
||||
|
||||
/* Per TC(Traffic class) traffic Counters */
|
||||
u64 rx_pkt_tc0_phy;
|
||||
u64 tx_pkt_tc0_phy;
|
||||
u64 rx_pkt_tc1_phy;
|
||||
u64 tx_pkt_tc1_phy;
|
||||
u64 rx_pkt_tc2_phy;
|
||||
u64 tx_pkt_tc2_phy;
|
||||
u64 rx_pkt_tc3_phy;
|
||||
u64 tx_pkt_tc3_phy;
|
||||
u64 rx_pkt_tc4_phy;
|
||||
u64 tx_pkt_tc4_phy;
|
||||
u64 rx_pkt_tc5_phy;
|
||||
u64 tx_pkt_tc5_phy;
|
||||
u64 rx_pkt_tc6_phy;
|
||||
u64 tx_pkt_tc6_phy;
|
||||
u64 rx_pkt_tc7_phy;
|
||||
u64 tx_pkt_tc7_phy;
|
||||
|
||||
u64 rx_byte_tc0_phy;
|
||||
u64 tx_byte_tc0_phy;
|
||||
u64 rx_byte_tc1_phy;
|
||||
u64 tx_byte_tc1_phy;
|
||||
u64 rx_byte_tc2_phy;
|
||||
u64 tx_byte_tc2_phy;
|
||||
u64 rx_byte_tc3_phy;
|
||||
u64 tx_byte_tc3_phy;
|
||||
u64 rx_byte_tc4_phy;
|
||||
u64 tx_byte_tc4_phy;
|
||||
u64 rx_byte_tc5_phy;
|
||||
u64 tx_byte_tc5_phy;
|
||||
u64 rx_byte_tc6_phy;
|
||||
u64 tx_byte_tc6_phy;
|
||||
u64 rx_byte_tc7_phy;
|
||||
u64 tx_byte_tc7_phy;
|
||||
|
||||
/* Per TC(Traffic Class) pause Counters */
|
||||
u64 rx_pause_tc0_phy;
|
||||
u64 tx_pause_tc0_phy;
|
||||
u64 rx_pause_tc1_phy;
|
||||
u64 tx_pause_tc1_phy;
|
||||
u64 rx_pause_tc2_phy;
|
||||
u64 tx_pause_tc2_phy;
|
||||
u64 rx_pause_tc3_phy;
|
||||
u64 tx_pause_tc3_phy;
|
||||
u64 rx_pause_tc4_phy;
|
||||
u64 tx_pause_tc4_phy;
|
||||
u64 rx_pause_tc5_phy;
|
||||
u64 tx_pause_tc5_phy;
|
||||
u64 rx_pause_tc6_phy;
|
||||
u64 tx_pause_tc6_phy;
|
||||
u64 rx_pause_tc7_phy;
|
||||
u64 tx_pause_tc7_phy;
|
||||
}; /* HW DATA */
|
||||
|
||||
/* Configure vPort Rx Steering */
|
||||
struct mana_cfg_rx_steer_req_v2 {
|
||||
struct gdma_req_hdr hdr;
|
||||
|
|
Loading…
Reference in New Issue