Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
b0d0309
jump_label: export static_key_slow_{inc,dec}_cpuslocked()
alobakin Dec 15, 2023
f1236f6
skbuff: allow 2-4-argument skb_frag_dma_map()
alobakin Apr 5, 2024
1c16c6d
unroll: add generic loop unroll helpers
alobakin Feb 12, 2024
59cf42e
bpf, xdp: constify some bpf_prog * function arguments
alobakin Dec 11, 2023
b9f85b3
xdp, xsk: constify read-only arguments of some static inline helpers
alobakin Dec 6, 2023
5886b62
xdp: allow attaching already registered memory model to xdp_rxq_info
alobakin Dec 19, 2023
c72dc47
xdp: register system page pool as an XDP memory model
tohojo Feb 20, 2024
662500d
page_pool: make page_pool_put_page_bulk() actually handle array of pages
alobakin Feb 5, 2024
df199a9
page_pool: allow mixing PPs within one bulk
alobakin Dec 7, 2023
7472e4c
xdp: get rid of xdp_frame::mem.id
alobakin Dec 7, 2023
a99ed0c
xdp: add generic xdp_buff_add_frag()
alobakin Dec 6, 2023
52dc005
xdp: add generic xdp_build_skb_from_buff()
alobakin Dec 6, 2023
f87b002
xsk: align &xdp_buff_xsk harder
alobakin Nov 4, 2024
260bac0
xsk: allow attaching XSk pool via xdp_rxq_info_reg_mem_model()
alobakin Jan 31, 2024
5a6d544
xsk: make xsk_buff_add_frag really add a frag via __xdp_buff_add_frag()
alobakin Jan 30, 2024
feab761
xsk: add generic XSk &xdp_buff -> skb conversion
alobakin Dec 22, 2023
2e79b05
xsk: add helper to get &xdp_desc's DMA and meta pointer in one go
alobakin Mar 25, 2024
73f36aa
libeth: support native XDP and register memory model
alobakin Dec 8, 2023
45a2217
libeth: add a couple of XDP helpers (libeth_xdp)
alobakin Dec 11, 2023
cd5ff9f
ice: remove legacy Rx and construct SKB
michalQb Oct 16, 2024
af46571
ice: drop page splitting and recycling
michalQb Oct 17, 2024
7b20304
ice: convert to page_pool (compilation only)
michalQb Oct 24, 2024
9f11113
pp conversion - TX run
michalQb Nov 13, 2024
08825a9
fix unmapping at the end
michalQb Nov 13, 2024
4c3fd8c
latest fix for xdp_act
michalQb Nov 14, 2024
532c841
fix for unmapping dma len
michalQb Nov 15, 2024
6cd6ebd
add other libeth helpers for xdp buffer
michalQb Nov 18, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
Original file line number Diff line number Diff line change
Expand Up @@ -2281,7 +2281,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0;

/* Release the initial buffer */
xdp_return_frame_rx_napi(xdpf);
Expand Down
1 change: 1 addition & 0 deletions drivers/net/ethernet/intel/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,7 @@ config ICE
select AUXILIARY_BUS
select DIMLIB
select LIBIE
select LIBETH_XDP
select NET_DEVLINK
select PLDMFW
select DPLL
Expand Down
30 changes: 3 additions & 27 deletions drivers/net/ethernet/intel/i40e/i40e_xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -395,32 +395,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
WARN_ON_ONCE(1);
}

static int
i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
struct xdp_buff *xdp, const unsigned int size)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);

if (!xdp_buff_has_frags(first)) {
sinfo->nr_frags = 0;
sinfo->xdp_frags_size = 0;
xdp_buff_set_frags_flag(first);
}

if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
xsk_buff_free(first);
return -ENOMEM;
}

__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
virt_to_page(xdp->data_hard_start),
XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);

return 0;
}

/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
Expand Down Expand Up @@ -486,8 +460,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)

if (!first)
first = bi;
else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
else if (!xsk_buff_add_frag(first, bi)) {
xsk_buff_free(first);
break;
}

if (++next_to_process == count)
next_to_process = 0;
Expand Down
3 changes: 2 additions & 1 deletion drivers/net/ethernet/intel/ice/ice.h
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,8 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;

u16 max_frame;

struct ice_aqc_vsi_props info; /* VSI properties */
struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */

Expand Down Expand Up @@ -511,7 +513,6 @@ enum ice_pf_flags {
ICE_FLAG_MOD_POWER_UNSUPPORTED,
ICE_FLAG_PHY_FW_LOAD_FAILED,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_VF_VLAN_PRUNING,
Expand Down
60 changes: 20 additions & 40 deletions drivers/net/ethernet/intel/ice/ice_base.c
Original file line number Diff line number Diff line change
Expand Up @@ -359,19 +359,6 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
tlan_ctx->legacy_int = ICE_TX_LEGACY;
}

/**
* ice_rx_offset - Return expected offset into page to access data
* @rx_ring: Ring we are requesting offset of
*
* Returns the offset value for ring into the data buffer.
*/
static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
return 0;
}

/**
* ice_setup_rx_ctx - Configure a receive ring context
* @ring: The Rx ring to configure
Expand Down Expand Up @@ -406,7 +393,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Receive Packet Data Buffer Size.
* The Packet Data Buffer Size is defined in 128 byte units.
*/
rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
rlan_ctx.dbuf = DIV_ROUND_UP(ICE_RXBUF_3072,
BIT_ULL(ICE_RLAN_CTX_DBUF_S));

/* use 32 byte descriptors */
Expand Down Expand Up @@ -447,8 +434,8 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
rlan_ctx.rxmax = min_t(u32, ring->max_frame,
ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
ICE_MAX_CHAINED_RX_BUFS * ICE_RXBUF_3072);

/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
Expand Down Expand Up @@ -484,13 +471,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
if (vsi->type == ICE_VSI_VF)
return 0;

/* configure Rx buffer alignment */
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
ice_clear_ring_build_skb_ena(ring);
else
ice_set_ring_build_skb_ena(ring);

ring->rx_offset = ice_rx_offset(ring);
ring->rx_offset = ICE_SKB_PAD;

/* init queue specific tail register */
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
Expand Down Expand Up @@ -525,7 +506,7 @@ static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
#if (PAGE_SIZE >= 8192)
frame_sz = rx_ring->rx_buf_len;
#else
frame_sz = ice_rx_pg_size(rx_ring) / 2;
frame_sz = PAGE_SIZE / 2;
#endif

return frame_sz;
Expand All @@ -541,6 +522,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
u32 rx_buf_len;
int err;

if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
Expand All @@ -557,12 +539,12 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
if (ring->xsk_pool) {
xdp_rxq_info_unreg(&ring->xdp_rxq);

ring->rx_buf_len =
rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
rx_buf_len);
if (err)
return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
Expand All @@ -585,15 +567,16 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return err;
}

err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
if (err)
return err;
//err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
//MEM_TYPE_PAGE_SHARED,
//NULL);
// MEM_TYPE_PAGE_POOL,
// ring->pp);
xdp_rxq_info_attach_page_pool(&ring->xdp_rxq,
ring->pp);
}
}

xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq);
ring->xdp.data = NULL;
ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring);
Expand Down Expand Up @@ -625,7 +608,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}

ice_alloc_rx_bufs(ring, num_bufs);
err = ice_alloc_rx_bufs(ring, num_bufs);

return 0;
}
Expand All @@ -648,18 +631,15 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
*/
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
ring->rx_buf_len = ICE_RXBUF_1664;
if (!vsi->netdev) {
vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
#endif
} else {
ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
ring->rx_buf_len = ICE_RXBUF_3072;
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
}
}

Expand Down
13 changes: 4 additions & 9 deletions drivers/net/ethernet/intel/ice/ice_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
#include <net/dcbnl.h>
#include <net/libeth/rx.h>

struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
Expand Down Expand Up @@ -340,7 +341,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};

#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
Expand Down Expand Up @@ -1252,7 +1252,7 @@ static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
*/
static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
{
struct ice_rx_buf *rx_buf;
struct libeth_fqe *rx_buf;
int valid_frames, i;
u8 *received_buf;

Expand All @@ -1268,8 +1268,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
continue;

rx_buf = &rx_ring->rx_buf[i];
received_buf = page_address(rx_buf->page) + rx_buf->page_offset;
rx_buf = &rx_ring->rx_fqes[i];
received_buf = page_address(rx_buf->page) + rx_buf->offset;

if (ice_lbtest_check_frame(received_buf))
valid_frames++;
Expand Down Expand Up @@ -1882,10 +1882,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
ice_nway_reset(netdev);
}
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
/* down and up VSI so that changes of Rx cfg are reflected. */
ice_down_up(vsi);
}
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
*/
Expand Down Expand Up @@ -3355,7 +3351,6 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
rx_rings[i].count = new_rx_cnt;
rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
rx_rings[i].desc = NULL;
rx_rings[i].rx_buf = NULL;
/* this is to allow wr32 to have something to write to
* during early allocation of Rx buffers
*/
Expand Down
19 changes: 6 additions & 13 deletions drivers/net/ethernet/intel/ice/ice_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"

MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_IMPORT_NS(LIBETH_XDP);
MODULE_IMPORT_NS(LIBETH);
MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
Expand Down Expand Up @@ -2983,10 +2985,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
*/
static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
{
if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
return ICE_RXBUF_1664;
else
return ICE_RXBUF_3072;
return ICE_RXBUF_3072;
}

/**
Expand Down Expand Up @@ -4859,8 +4858,8 @@ static void ice_init_features(struct ice_pf *pf)
ice_dpll_init(pf);

/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
dev_err(dev, "could not initialize flow director\n");
//if (ice_init_fdir(pf))
// dev_err(dev, "could not initialize flow director\n");

/* Note: DCB init failure is non-fatal to load */
if (ice_init_pf_dcb(pf, false)) {
Expand All @@ -4884,7 +4883,7 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
ice_cfg_lldp_mib_change(&pf->hw, false);
ice_deinit_fdir(pf);
//ice_deinit_fdir(pf);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_exit(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
Expand Down Expand Up @@ -7806,12 +7805,6 @@ int ice_change_mtu(struct net_device *netdev, int new_mtu)
frame_size - ICE_ETH_PKT_HDR_PAD);
return -EINVAL;
}
} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
return -EINVAL;
}
}

/* if a reset is in progress, wait for some time for it to complete */
Expand Down
Loading