From bd30e8d7bfa6e528f9e746c940e6f7246c7899d6 Mon Sep 17 00:00:00 2001 From: Hsin-chen Chuang Date: Fri, 14 Feb 2025 19:17:09 +0800 Subject: [PATCH 01/24] Bluetooth: Always allow SCO packets for user channel The SCO packets from Bluetooth raw socket are now rejected because hci_conn_num is left 0. This patch allows such the usecase to enable the userspace SCO support. Fixes: b16b327edb4d ("Bluetooth: btusb: add sysfs attribute to control USB alt setting") Signed-off-by: Hsin-chen Chuang Signed-off-by: Luiz Augusto von Dentz --- drivers/bluetooth/btusb.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 90966dfbd278..8149e53fd0a7 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2102,7 +2102,8 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return submit_or_queue_tx_urb(hdev, urb); case HCI_SCODATA_PKT: - if (hci_conn_num(hdev, SCO_LINK) < 1) + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_conn_num(hdev, SCO_LINK) < 1) return -ENODEV; urb = alloc_isoc_urb(hdev, skb); @@ -2576,7 +2577,8 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) return submit_or_queue_tx_urb(hdev, urb); case HCI_SCODATA_PKT: - if (hci_conn_num(hdev, SCO_LINK) < 1) + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_conn_num(hdev, SCO_LINK) < 1) return -ENODEV; urb = alloc_isoc_urb(hdev, skb); From b25120e1d5f2ebb3db00af557709041f47f7f3d0 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Fri, 14 Feb 2025 10:30:25 -0500 Subject: [PATCH 02/24] Bluetooth: L2CAP: Fix L2CAP_ECRED_CONN_RSP response L2CAP_ECRED_CONN_RSP needs to respond DCID in the same order received as SCID but the order is reversed due to use of list_add which actually prepend channels to the list so the response is reversed: > ACL Data RX: Handle 16 flags 0x02 dlen 26 LE L2CAP: Enhanced Credit Connection Request (0x17) ident 2 len 18 PSM: 39 (0x0027) MTU: 256 MPS: 251 Credits: 65535 Source CID: 116 Source CID: 117 Source CID: 118 Source CID: 119 Source CID: 120 < ACL Data TX: Handle 16 flags 0x00 dlen 26 LE L2CAP: Enhanced Credit Connection Response (0x18) ident 2 len 18 MTU: 517 MPS: 247 Credits: 3 Result: Connection successful (0x0000) Destination CID: 68 Destination CID: 67 Destination CID: 66 Destination CID: 65 Destination CID: 64 Also make sure the response don't include channels that are not on BT_CONNECT2 since the chan->ident can be set to the same value as in the following trace: < ACL Data TX: Handle 16 flags 0x00 dlen 12 LE L2CAP: LE Flow Control Credit (0x16) ident 6 len 4 Source CID: 64 Credits: 1 ... > ACL Data RX: Handle 16 flags 0x02 dlen 18 LE L2CAP: Enhanced Credit Connection Request (0x17) ident 6 len 10 PSM: 39 (0x0027) MTU: 517 MPS: 251 Credits: 255 Source CID: 70 < ACL Data TX: Handle 16 flags 0x00 dlen 20 LE L2CAP: Enhanced Credit Connection Response (0x18) ident 6 len 12 MTU: 517 MPS: 247 Credits: 3 Result: Connection successful (0x0000) Destination CID: 64 Destination CID: 68 Closes: https://github.com/bluez/bluez/issues/1094 Fixes: 9aa9d9473f15 ("Bluetooth: L2CAP: Fix responding with wrong PDU type") Signed-off-by: Luiz Augusto von Dentz --- net/bluetooth/l2cap_core.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index fec11e576f31..b22078b67972 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -632,7 +632,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_hold(conn->hcon); - list_add(&chan->list, &conn->chan_l); + /* Append to the list since the order matters for ECRED */ + list_add_tail(&chan->list, &conn->chan_l); } void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) @@ -3771,7 +3772,11 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) struct l2cap_ecred_conn_rsp *rsp_flex = container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr); - if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + /* Check if channel for outgoing connection or if it wasn't deferred + * since in those cases it must be skipped. + */ + if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) || + !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) return; /* Reset ident so only one response is sent */ From 992ee3ed6e9fdd0be83a7daa5ff738e3cf86047f Mon Sep 17 00:00:00 2001 From: George Moussalem Date: Wed, 19 Feb 2025 14:09:21 +0100 Subject: [PATCH 03/24] net: phy: qcom: qca807x fix condition for DAC_DSP_BIAS_CURRENT While setting the DAC value, the wrong boolean value is evaluated to set the DSP bias current. So let's correct the conditional statement and use the right boolean value read from the DTS set in the priv. Cc: stable@vger.kernel.org Fixes: d1cb613efbd3 ("net: phy: qcom: add support for QCA807x PHY Family") Signed-off-by: George Moussalem Signed-off-by: Christian Marangi Reviewed-by: Andrew Lunn Link: https://patch.msgid.link/20250219130923.7216-1-ansuelsmth@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/qcom/qca807x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c index 3279de857b47..2ad8c2586d64 100644 --- a/drivers/net/phy/qcom/qca807x.c +++ b/drivers/net/phy/qcom/qca807x.c @@ -774,7 +774,7 @@ static int qca807x_config_init(struct phy_device *phydev) control_dac &= ~QCA807X_CONTROL_DAC_MASK; if (!priv->dac_full_amplitude) control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE; - if (!priv->dac_full_amplitude) + if (!priv->dac_full_bias_current) control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT; if (!priv->dac_disable_bias_current_tweak) control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK; From f06e4bfd010faefa637689d2df2c727dbf6e1d27 Mon Sep 17 00:00:00 2001 From: Qunqin Zhao Date: Wed, 19 Feb 2025 10:07:01 +0800 Subject: [PATCH 04/24] net: stmmac: dwmac-loongson: Add fix_soc_reset() callback Loongson's DWMAC device may take nearly two seconds to complete DMA reset, however, the default waiting time for reset is 200 milliseconds. Therefore, the following error message may appear: [14.427169] dwmac-loongson-pci 0000:00:03.2: Failed to reset the dma Fixes: 803fc61df261 ("net: stmmac: dwmac-loongson: Add Loongson Multi-channels GMAC support") Cc: stable@vger.kernel.org Signed-off-by: Qunqin Zhao Reviewed-by: Huacai Chen Reviewed-by: Jacob Keller Acked-by: Yanteng Si Link: https://patch.msgid.link/20250219020701.15139-1-zhaoqunqin@loongson.cn Signed-off-by: Jakub Kicinski --- .../net/ethernet/stmicro/stmmac/dwmac-loongson.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index bfe6e2d631bd..f5acfb7d4ff6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -516,6 +516,19 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev, return 0; } +/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */ +static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + + return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, + !(value & DMA_BUS_MODE_SFT_RESET), + 10000, 2000000); +} + static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct plat_stmmacenet_data *plat; @@ -566,6 +579,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id plat->bsp_priv = ld; plat->setup = loongson_dwmac_setup; + plat->fix_soc_reset = loongson_dwmac_fix_reset; ld->dev = &pdev->dev; ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff; From c34d999ca3145d9fe858258cc3342ec493f47d2e Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 18 Feb 2025 19:22:44 +0000 Subject: [PATCH 05/24] rxrpc: rxperf: Fix missing decoding of terminal magic cookie The rxperf RPCs seem to have a magic cookie at the end of the request that was failing to be taken account of by the unmarshalling of the request. Fix the rxperf code to expect this. Fixes: 75bfdbf2fca3 ("rxrpc: Implement an in-kernel rxperf server for testing purposes") Signed-off-by: David Howells cc: Marc Dionne cc: Simon Horman cc: linux-afs@lists.infradead.org Link: https://patch.msgid.link/20250218192250.296870-2-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/rxperf.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c index 7ef93407be83..e848a4777b8c 100644 --- a/net/rxrpc/rxperf.c +++ b/net/rxrpc/rxperf.c @@ -478,6 +478,18 @@ static int rxperf_deliver_request(struct rxperf_call *call) call->unmarshal++; fallthrough; case 2: + ret = rxperf_extract_data(call, true); + if (ret < 0) + return ret; + + /* Deal with the terminal magic cookie. */ + call->iov_len = 4; + call->kvec[0].iov_len = call->iov_len; + call->kvec[0].iov_base = call->tmp; + iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len); + call->unmarshal++; + fallthrough; + case 3: ret = rxperf_extract_data(call, false); if (ret < 0) return ret; From 833fefa074444b1e7f7e834cbdce59ce02562ed0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 18 Feb 2025 19:22:45 +0000 Subject: [PATCH 06/24] rxrpc: peer->mtu_lock is redundant The peer->mtu_lock is only used to lock around writes to peer->max_data - and nothing else; further, all such writes take place in the I/O thread and the lock is only ever write-locked and never read-locked. In a couple of places, the write_seqcount_begin() is wrapped in preempt_disable/enable(), but not in all places. This can cause lockdep to complain: WARNING: CPU: 0 PID: 1549 at include/linux/seqlock.h:221 rxrpc_input_ack_trailer+0x305/0x430 ... RIP: 0010:rxrpc_input_ack_trailer+0x305/0x430 Fix this by just getting rid of the lock. Fixes: eeaedc5449d9 ("rxrpc: Implement path-MTU probing using padded PING ACKs (RFC8899)") Signed-off-by: David Howells cc: Marc Dionne cc: Simon Horman cc: linux-afs@lists.infradead.org Link: https://patch.msgid.link/20250218192250.296870-3-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/ar-internal.h | 1 - net/rxrpc/input.c | 2 -- net/rxrpc/peer_event.c | 9 +-------- net/rxrpc/peer_object.c | 1 - 4 files changed, 1 insertion(+), 12 deletions(-) diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 5e740c486203..a64a0cab1bf7 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -360,7 +360,6 @@ struct rxrpc_peer { u8 pmtud_jumbo; /* Max jumbo packets for the MTU */ bool ackr_adv_pmtud; /* T if the peer advertises path-MTU */ unsigned int ackr_max_data; /* Maximum data advertised by peer */ - seqcount_t mtu_lock; /* Lockless MTU access management */ unsigned int if_mtu; /* Local interface MTU (- hdrsize) for this peer */ unsigned int max_data; /* Maximum packet data capacity for this peer */ unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 9047ba13bd31..24aceb183c2c 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -810,9 +810,7 @@ static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb if (max_mtu < peer->max_data) { trace_rxrpc_pmtud_reduce(peer, sp->hdr.serial, max_mtu, rxrpc_pmtud_reduce_ack); - write_seqcount_begin(&peer->mtu_lock); peer->max_data = max_mtu; - write_seqcount_end(&peer->mtu_lock); } max_data = umin(max_mtu, peer->max_data); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index bc283da9ee40..7f4729234957 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -130,9 +130,7 @@ static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu) peer->pmtud_bad = max_data + 1; trace_rxrpc_pmtud_reduce(peer, 0, max_data, rxrpc_pmtud_reduce_icmp); - write_seqcount_begin(&peer->mtu_lock); peer->max_data = max_data; - write_seqcount_end(&peer->mtu_lock); } } @@ -408,13 +406,8 @@ void rxrpc_input_probe_for_pmtud(struct rxrpc_connection *conn, rxrpc_serial_t a } max_data = umin(max_data, peer->ackr_max_data); - if (max_data != peer->max_data) { - preempt_disable(); - write_seqcount_begin(&peer->mtu_lock); + if (max_data != peer->max_data) peer->max_data = max_data; - write_seqcount_end(&peer->mtu_lock); - preempt_enable(); - } jumbo = max_data + sizeof(struct rxrpc_jumbo_header); jumbo /= RXRPC_JUMBO_SUBPKTLEN; diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 0fcc87f0409f..2ddc8ed68742 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -235,7 +235,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp, peer->service_conns = RB_ROOT; seqlock_init(&peer->service_conn_lock); spin_lock_init(&peer->lock); - seqcount_init(&peer->mtu_lock); peer->debug_id = atomic_inc_return(&rxrpc_debug_id); peer->recent_srtt_us = UINT_MAX; peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW; From 71f5409176f4ffd460689eb5423a20332d00e342 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 18 Feb 2025 19:22:46 +0000 Subject: [PATCH 07/24] rxrpc: Fix locking issues with the peer record hash rxrpc_new_incoming_peer() can't use spin_lock_bh() whilst its caller has interrupts disabled. WARNING: CPU: 0 PID: 1550 at kernel/softirq.c:369 __local_bh_enable_ip+0x46/0xd0 ... Call Trace: rxrpc_alloc_incoming_call+0x1b0/0x400 rxrpc_new_incoming_call+0x1dd/0x5e0 rxrpc_input_packet+0x84a/0x920 rxrpc_io_thread+0x40d/0xb40 kthread+0x2ec/0x300 ret_from_fork+0x24/0x40 ret_from_fork_asm+0x1a/0x30 irq event stamp: 1811 hardirqs last enabled at (1809): _raw_spin_unlock_irq+0x24/0x50 hardirqs last disabled at (1810): _raw_read_lock_irq+0x17/0x70 softirqs last enabled at (1182): handle_softirqs+0x3ee/0x430 softirqs last disabled at (1811): rxrpc_new_incoming_peer+0x56/0x120 Fix this by using a plain spin_lock() instead. IRQs are held, so softirqs can't happen. Fixes: a2ea9a907260 ("rxrpc: Use irq-disabling spinlocks between app and I/O thread") Signed-off-by: David Howells cc: Marc Dionne cc: Simon Horman cc: linux-afs@lists.infradead.org Link: https://patch.msgid.link/20250218192250.296870-4-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/peer_object.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 2ddc8ed68742..56e09d161a97 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -324,10 +324,10 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) hash_key = rxrpc_peer_hash_key(local, &peer->srx); rxrpc_init_peer(local, peer, hash_key); - spin_lock_bh(&rxnet->peer_hash_lock); + spin_lock(&rxnet->peer_hash_lock); hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new); - spin_unlock_bh(&rxnet->peer_hash_lock); + spin_unlock(&rxnet->peer_hash_lock); } /* From add117e48df4788a86a21bd0515833c0a6db1ad1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 18 Feb 2025 19:22:47 +0000 Subject: [PATCH 08/24] afs: Fix the server_list to unuse a displaced server rather than putting it When allocating and building an afs_server_list struct object from a VLDB record, we look up each server address to get the server record for it - but a server may have more than one entry in the record and we discard the duplicate pointers. Currently, however, when we discard, we only put a server record, not unuse it - but the lookup got as an active-user count. The active-user count on an afs_server_list object determines its lifetime whereas the refcount keeps the memory backing it around. Failing to reduce the active-user counter prevents the record from being cleaned up and can lead to multiple copied being seen - and pointing to deleted afs_cell objects and other such things. Fix this by switching the incorrect 'put' to an 'unuse' instead. Without this, occasionally, a dead server record can be seen in /proc/net/afs/servers and list corruption may be observed: list_del corruption. prev->next should be ffff888102423e40, but was 0000000000000000. (prev=ffff88810140cd38) Fixes: 977e5f8ed0ab ("afs: Split the usage count on struct afs_server") Signed-off-by: David Howells cc: Marc Dionne cc: Simon Horman cc: linux-afs@lists.infradead.org Link: https://patch.msgid.link/20250218192250.296870-5-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- fs/afs/server_list.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 7e7e567a7f8a..d20cd902ef94 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c @@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume, break; if (j < slist->nr_servers) { if (slist->servers[j].server == server) { - afs_put_server(volume->cell->net, server, - afs_server_trace_put_slist_isort); + afs_unuse_server(volume->cell->net, server, + afs_server_trace_put_slist_isort); continue; } From 1f0fc3374f3345ff1d150c5c56ac5016e5d3826a Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 18 Feb 2025 19:22:48 +0000 Subject: [PATCH 09/24] afs: Give an afs_server object a ref on the afs_cell object it points to Give an afs_server object a ref on the afs_cell object it points to so that the cell doesn't get deleted before the server record. Whilst this is circular (cell -> vol -> server_list -> server -> cell), the ref only pins the memory, not the lifetime as that's controlled by the activity counter. When the volume's activity counter reaches 0, it detaches from the cell and discards its server list; when a cell's activity counter reaches 0, it discards its root volume. At that point, the circularity is cut. Fixes: d2ddc776a458 ("afs: Overhaul volume and server record caching and fileserver rotation") Signed-off-by: David Howells cc: Marc Dionne cc: Simon Horman cc: linux-afs@lists.infradead.org Link: https://patch.msgid.link/20250218192250.296870-6-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- fs/afs/server.c | 3 +++ include/trace/events/afs.h | 2 ++ 2 files changed, 5 insertions(+) diff --git a/fs/afs/server.c b/fs/afs/server.c index 038f9d0ae3af..4504e16b458c 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -163,6 +163,8 @@ static struct afs_server *afs_install_server(struct afs_cell *cell, rb_insert_color(&server->uuid_rb, &net->fs_servers); hlist_add_head_rcu(&server->proc_link, &net->fs_proc); + afs_get_cell(cell, afs_cell_trace_get_server); + added_dup: write_seqlock(&net->fs_addr_lock); estate = rcu_dereference_protected(server->endpoint_state, @@ -442,6 +444,7 @@ static void afs_server_rcu(struct rcu_head *rcu) atomic_read(&server->active), afs_server_trace_free); afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state), afs_estate_trace_put_server); + afs_put_cell(server->cell, afs_cell_trace_put_server); kfree(server); } diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index b0db89058c91..958a2460330c 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -174,6 +174,7 @@ enum yfs_cm_operation { EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \ EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \ EM(afs_cell_trace_get_queue_new, "GET q-new ") \ + EM(afs_cell_trace_get_server, "GET server") \ EM(afs_cell_trace_get_vol, "GET vol ") \ EM(afs_cell_trace_insert, "INSERT ") \ EM(afs_cell_trace_manage, "MANAGE ") \ @@ -182,6 +183,7 @@ enum yfs_cm_operation { EM(afs_cell_trace_put_destroy, "PUT destry") \ EM(afs_cell_trace_put_queue_work, "PUT q-work") \ EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \ + EM(afs_cell_trace_put_server, "PUT server") \ EM(afs_cell_trace_put_vol, "PUT vol ") \ EM(afs_cell_trace_see_source, "SEE source") \ EM(afs_cell_trace_see_ws, "SEE ws ") \ From 5c70eb5c593d64d93b178905da215a9fd288a4b5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 20 Feb 2025 13:18:54 +0000 Subject: [PATCH 10/24] net: better track kernel sockets lifetime While kernel sockets are dismantled during pernet_operations->exit(), their freeing can be delayed by any tx packets still held in qdisc or device queues, due to skb_set_owner_w() prior calls. This then trigger the following warning from ref_tracker_dir_exit() [1] To fix this, make sure that kernel sockets own a reference on net->passive. Add sk_net_refcnt_upgrade() helper, used whenever a kernel socket is converted to a refcounted one. [1] [ 136.263918][ T35] ref_tracker: net notrefcnt@ffff8880638f01e0 has 1/2 users at [ 136.263918][ T35] sk_alloc+0x2b3/0x370 [ 136.263918][ T35] inet6_create+0x6ce/0x10f0 [ 136.263918][ T35] __sock_create+0x4c0/0xa30 [ 136.263918][ T35] inet_ctl_sock_create+0xc2/0x250 [ 136.263918][ T35] igmp6_net_init+0x39/0x390 [ 136.263918][ T35] ops_init+0x31e/0x590 [ 136.263918][ T35] setup_net+0x287/0x9e0 [ 136.263918][ T35] copy_net_ns+0x33f/0x570 [ 136.263918][ T35] create_new_namespaces+0x425/0x7b0 [ 136.263918][ T35] unshare_nsproxy_namespaces+0x124/0x180 [ 136.263918][ T35] ksys_unshare+0x57d/0xa70 [ 136.263918][ T35] __x64_sys_unshare+0x38/0x40 [ 136.263918][ T35] do_syscall_64+0xf3/0x230 [ 136.263918][ T35] entry_SYSCALL_64_after_hwframe+0x77/0x7f [ 136.263918][ T35] [ 136.343488][ T35] ref_tracker: net notrefcnt@ffff8880638f01e0 has 1/2 users at [ 136.343488][ T35] sk_alloc+0x2b3/0x370 [ 136.343488][ T35] inet6_create+0x6ce/0x10f0 [ 136.343488][ T35] __sock_create+0x4c0/0xa30 [ 136.343488][ T35] inet_ctl_sock_create+0xc2/0x250 [ 136.343488][ T35] ndisc_net_init+0xa7/0x2b0 [ 136.343488][ T35] ops_init+0x31e/0x590 [ 136.343488][ T35] setup_net+0x287/0x9e0 [ 136.343488][ T35] copy_net_ns+0x33f/0x570 [ 136.343488][ T35] create_new_namespaces+0x425/0x7b0 [ 136.343488][ T35] unshare_nsproxy_namespaces+0x124/0x180 [ 136.343488][ T35] ksys_unshare+0x57d/0xa70 [ 136.343488][ T35] __x64_sys_unshare+0x38/0x40 [ 136.343488][ T35] do_syscall_64+0xf3/0x230 [ 136.343488][ T35] entry_SYSCALL_64_after_hwframe+0x77/0x7f Fixes: 0cafd77dcd03 ("net: add a refcount tracker for kernel sockets") Reported-by: syzbot+30a19e01a97420719891@syzkaller.appspotmail.com Closes: https://lore.kernel.org/netdev/67b72aeb.050a0220.14d86d.0283.GAE@google.com/T/#u Signed-off-by: Eric Dumazet Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250220131854.4048077-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/net/sock.h | 1 + net/core/sock.c | 27 ++++++++++++++++++++++----- net/mptcp/subflow.c | 5 +---- net/netlink/af_netlink.c | 10 ---------- net/rds/tcp.c | 8 ++------ net/smc/af_smc.c | 5 +---- net/sunrpc/svcsock.c | 5 +---- net/sunrpc/xprtsock.c | 8 ++------ 8 files changed, 30 insertions(+), 39 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 8036b3b79cd8..7ef728324e4e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1751,6 +1751,7 @@ static inline bool sock_allow_reclassification(const struct sock *csk) struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern); void sk_free(struct sock *sk); +void sk_net_refcnt_upgrade(struct sock *sk); void sk_destruct(struct sock *sk); struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); void sk_free_unlock_clone(struct sock *sk); diff --git a/net/core/sock.c b/net/core/sock.c index eae2ae70a2e0..6c0e87f97fa4 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2246,6 +2246,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, get_net_track(net, &sk->ns_tracker, priority); sock_inuse_add(net, 1); } else { + net_passive_inc(net); __netns_tracker_alloc(net, &sk->ns_tracker, false, priority); } @@ -2270,6 +2271,7 @@ EXPORT_SYMBOL(sk_alloc); static void __sk_destruct(struct rcu_head *head) { struct sock *sk = container_of(head, struct sock, sk_rcu); + struct net *net = sock_net(sk); struct sk_filter *filter; if (sk->sk_destruct) @@ -2301,14 +2303,28 @@ static void __sk_destruct(struct rcu_head *head) put_cred(sk->sk_peer_cred); put_pid(sk->sk_peer_pid); - if (likely(sk->sk_net_refcnt)) - put_net_track(sock_net(sk), &sk->ns_tracker); - else - __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); - + if (likely(sk->sk_net_refcnt)) { + put_net_track(net, &sk->ns_tracker); + } else { + __netns_tracker_free(net, &sk->ns_tracker, false); + net_passive_dec(net); + } sk_prot_free(sk->sk_prot_creator, sk); } +void sk_net_refcnt_upgrade(struct sock *sk) +{ + struct net *net = sock_net(sk); + + WARN_ON_ONCE(sk->sk_net_refcnt); + __netns_tracker_free(net, &sk->ns_tracker, false); + net_passive_dec(net); + sk->sk_net_refcnt = 1; + get_net_track(net, &sk->ns_tracker, GFP_KERNEL); + sock_inuse_add(net, 1); +} +EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade); + void sk_destruct(struct sock *sk) { bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); @@ -2405,6 +2421,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) * is not properly dismantling its kernel sockets at netns * destroy time. */ + net_passive_inc(sock_net(newsk)); __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker, false, priority); } diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index fd021cf8286e..dfcbef9c4624 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1772,10 +1772,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, * needs it. * Update ns_tracker to current stack trace and refcounted tracker. */ - __netns_tracker_free(net, &sf->sk->ns_tracker, false); - sf->sk->sk_net_refcnt = 1; - get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sf->sk); err = tcp_set_ulp(sf->sk, "mptcp"); if (err) goto err_free; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 85311226183a..a53ea60d0a78 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -795,16 +795,6 @@ static int netlink_release(struct socket *sock) sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); - /* Because struct net might disappear soon, do not keep a pointer. */ - if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) { - __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); - /* Because of deferred_put_nlk_sk and use of work queue, - * it is possible netns will be freed before this socket. - */ - sock_net_set(sk, &init_net); - __netns_tracker_alloc(&init_net, &sk->ns_tracker, - false, GFP_KERNEL); - } call_rcu(&nlk->rcu, deferred_put_nlk_sk); return 0; } diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 0581c53e6517..3cc2f303bf78 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -504,12 +504,8 @@ bool rds_tcp_tune(struct socket *sock) release_sock(sk); return false; } - /* Update ns_tracker to current stack trace and refcounted tracker */ - __netns_tracker_free(net, &sk->ns_tracker, false); - - sk->sk_net_refcnt = 1; - netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sk); + put_net(net); } rtn = net_generic(net, rds_tcp_netid); if (rtn->sndbuf_size > 0) { diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index ca6984541edb..3e6cb35baf25 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -3337,10 +3337,7 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family) * which need net ref. */ sk = smc->clcsock->sk; - __netns_tracker_free(net, &sk->ns_tracker, false); - sk->sk_net_refcnt = 1; - get_net_track(net, &sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sk); return 0; } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index cb3bd12f5818..72e5a01df3d3 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1541,10 +1541,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, newlen = error; if (protocol == IPPROTO_TCP) { - __netns_tracker_free(net, &sock->sk->ns_tracker, false); - sock->sk->sk_net_refcnt = 1; - get_net_track(net, &sock->sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sock->sk); if ((error = kernel_listen(sock, 64)) < 0) goto bummer; } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c60936d8cef7..940fe65b2a35 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1941,12 +1941,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, goto out; } - if (protocol == IPPROTO_TCP) { - __netns_tracker_free(xprt->xprt_net, &sock->sk->ns_tracker, false); - sock->sk->sk_net_refcnt = 1; - get_net_track(xprt->xprt_net, &sock->sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(xprt->xprt_net, 1); - } + if (protocol == IPPROTO_TCP) + sk_net_refcnt_upgrade(sock->sk); filp = sock_alloc_file(sock, O_NONBLOCK, NULL); if (IS_ERR(filp)) From 0e4427f8f587c4b603475468bb3aee9418574893 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 20 Feb 2025 09:25:59 +0200 Subject: [PATCH 11/24] net: loopback: Avoid sending IP packets without an Ethernet header After commit 22600596b675 ("ipv4: give an IPv4 dev to blackhole_netdev") IPv4 neighbors can be constructed on the blackhole net device, but they are constructed with an output function (neigh_direct_output()) that simply calls dev_queue_xmit(). The latter will transmit packets via 'skb->dev' which might not be the blackhole net device if dst_dev_put() switched 'dst->dev' to the blackhole net device while another CPU was using the dst entry in ip_output(), but after it already initialized 'skb->dev' from 'dst->dev'. Specifically, the following can happen: CPU1 CPU2 udp_sendmsg(sk1) udp_sendmsg(sk2) udp_send_skb() [...] ip_output() skb->dev = skb_dst(skb)->dev dst_dev_put() dst->dev = blackhole_netdev ip_finish_output2() resolves neigh on dst->dev neigh_output() neigh_direct_output() dev_queue_xmit() This will result in IPv4 packets being sent without an Ethernet header via a valid net device: tcpdump: verbose output suppressed, use -v[v]... for full protocol decode listening on enp9s0, link-type EN10MB (Ethernet), snapshot length 262144 bytes 22:07:02.329668 20:00:40:11:18:fb > 45:00:00:44:f4:94, ethertype Unknown (0x58c6), length 68: 0x0000: 8dda 74ca f1ae ca6c ca6c 0098 969c 0400 ..t....l.l...... 0x0010: 0000 4730 3f18 6800 0000 0000 0000 9971 ..G0?.h........q 0x0020: c4c9 9055 a157 0a70 9ead bf83 38ca ab38 ...U.W.p....8..8 0x0030: 8add ab96 e052 .....R Fix by making sure that neighbors are constructed on top of the blackhole net device with an output function that simply consumes the packets, in a similar fashion to dst_discard_out() and blackhole_netdev_xmit(). Fixes: 8d7017fd621d ("blackhole_netdev: use blackhole_netdev to invalidate dst entries") Fixes: 22600596b675 ("ipv4: give an IPv4 dev to blackhole_netdev") Reported-by: Florian Meister Closes: https://lore.kernel.org/netdev/20250210084931.23a5c2e4@hermes.local/ Signed-off-by: Ido Schimmel Reviewed-by: Eric Dumazet Link: https://patch.msgid.link/20250220072559.782296-1-idosch@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/loopback.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index c8840c3b9a1b..f1d68153987e 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -244,8 +244,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb) +{ + kfree_skb(skb); + return 0; +} + +static int blackhole_neigh_construct(struct net_device *dev, + struct neighbour *n) +{ + n->output = blackhole_neigh_output; + return 0; +} + static const struct net_device_ops blackhole_netdev_ops = { .ndo_start_xmit = blackhole_netdev_xmit, + .ndo_neigh_construct = blackhole_neigh_construct, }; /* This is a dst-dummy device used specifically for invalidated From c180188ec02281126045414e90d08422a80f75b4 Mon Sep 17 00:00:00 2001 From: "Jiri Slaby (SUSE)" Date: Thu, 20 Feb 2025 12:07:52 +0100 Subject: [PATCH 12/24] net: set the minimum for net_hotdata.netdev_budget_usecs Commit 7acf8a1e8a28 ("Replace 2 jiffies with sysctl netdev_budget_usecs to enable softirq tuning") added a possibility to set net_hotdata.netdev_budget_usecs, but added no lower bound checking. Commit a4837980fd9f ("net: revert default NAPI poll timeout to 2 jiffies") made the *initial* value HZ-dependent, so the initial value is at least 2 jiffies even for lower HZ values (2 ms for 1000 Hz, 8ms for 250 Hz, 20 ms for 100 Hz). But a user still can set improper values by a sysctl. Set .extra1 (the lower bound) for net_hotdata.netdev_budget_usecs to the same value as in the latter commit. That is to 2 jiffies. Fixes: a4837980fd9f ("net: revert default NAPI poll timeout to 2 jiffies") Fixes: 7acf8a1e8a28 ("Replace 2 jiffies with sysctl netdev_budget_usecs to enable softirq tuning") Signed-off-by: Jiri Slaby (SUSE) Cc: Dmitry Yakunin Cc: Konstantin Khlebnikov Link: https://patch.msgid.link/20250220110752.137639-1-jirislaby@kernel.org Signed-off-by: Jakub Kicinski --- net/core/sysctl_net_core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index ad2741f1346a..c7769ee0d9c5 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -34,6 +34,7 @@ static int min_sndbuf = SOCK_MIN_SNDBUF; static int min_rcvbuf = SOCK_MIN_RCVBUF; static int max_skb_frags = MAX_SKB_FRAGS; static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE; +static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ; static int net_msg_warn; /* Unused, but still a sysctl */ @@ -587,7 +588,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, + .extra1 = &netdev_budget_usecs_min, }, { .procname = "fb_tunnels_only_for_init_net", From 27843ce6ba3d3122b65066550fe33fb8839f8aef Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 20 Feb 2025 15:53:36 +0000 Subject: [PATCH 13/24] ipvlan: ensure network headers are in skb linear part syzbot found that ipvlan_process_v6_outbound() was assuming the IPv6 network header isis present in skb->head [1] Add the needed pskb_network_may_pull() calls for both IPv4 and IPv6 handlers. [1] BUG: KMSAN: uninit-value in __ipv6_addr_type+0xa2/0x490 net/ipv6/addrconf_core.c:47 __ipv6_addr_type+0xa2/0x490 net/ipv6/addrconf_core.c:47 ipv6_addr_type include/net/ipv6.h:555 [inline] ip6_route_output_flags_noref net/ipv6/route.c:2616 [inline] ip6_route_output_flags+0x51/0x720 net/ipv6/route.c:2651 ip6_route_output include/net/ip6_route.h:93 [inline] ipvlan_route_v6_outbound+0x24e/0x520 drivers/net/ipvlan/ipvlan_core.c:476 ipvlan_process_v6_outbound drivers/net/ipvlan/ipvlan_core.c:491 [inline] ipvlan_process_outbound drivers/net/ipvlan/ipvlan_core.c:541 [inline] ipvlan_xmit_mode_l3 drivers/net/ipvlan/ipvlan_core.c:605 [inline] ipvlan_queue_xmit+0xd72/0x1780 drivers/net/ipvlan/ipvlan_core.c:671 ipvlan_start_xmit+0x5b/0x210 drivers/net/ipvlan/ipvlan_main.c:223 __netdev_start_xmit include/linux/netdevice.h:5150 [inline] netdev_start_xmit include/linux/netdevice.h:5159 [inline] xmit_one net/core/dev.c:3735 [inline] dev_hard_start_xmit+0x247/0xa20 net/core/dev.c:3751 sch_direct_xmit+0x399/0xd40 net/sched/sch_generic.c:343 qdisc_restart net/sched/sch_generic.c:408 [inline] __qdisc_run+0x14da/0x35d0 net/sched/sch_generic.c:416 qdisc_run+0x141/0x4d0 include/net/pkt_sched.h:127 net_tx_action+0x78b/0x940 net/core/dev.c:5484 handle_softirqs+0x1a0/0x7c0 kernel/softirq.c:561 __do_softirq+0x14/0x1a kernel/softirq.c:595 do_softirq+0x9a/0x100 kernel/softirq.c:462 __local_bh_enable_ip+0x9f/0xb0 kernel/softirq.c:389 local_bh_enable include/linux/bottom_half.h:33 [inline] rcu_read_unlock_bh include/linux/rcupdate.h:919 [inline] __dev_queue_xmit+0x2758/0x57d0 net/core/dev.c:4611 dev_queue_xmit include/linux/netdevice.h:3311 [inline] packet_xmit+0x9c/0x6c0 net/packet/af_packet.c:276 packet_snd net/packet/af_packet.c:3132 [inline] packet_sendmsg+0x93e0/0xa7e0 net/packet/af_packet.c:3164 sock_sendmsg_nosec net/socket.c:718 [inline] Fixes: 2ad7bf363841 ("ipvlan: Initial check-in of the IPVLAN driver.") Reported-by: syzbot+93ab4a777bafb9d9f960@syzkaller.appspotmail.com Closes: https://lore.kernel.org/netdev/67b74f01.050a0220.14d86d.02d8.GAE@google.com/T/#u Signed-off-by: Eric Dumazet Cc: Mahesh Bandewar Link: https://patch.msgid.link/20250220155336.61884-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- drivers/net/ipvlan/ipvlan_core.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index fd591ddb3884..ca62188a317a 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -416,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) { - const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; struct net *net = dev_net(dev); - struct rtable *rt; int err, ret = NET_XMIT_DROP; + const struct iphdr *ip4h; + struct rtable *rt; struct flowi4 fl4 = { .flowi4_oif = dev->ifindex, - .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)), .flowi4_flags = FLOWI_FLAG_ANYSRC, .flowi4_mark = skb->mark, - .daddr = ip4h->daddr, - .saddr = ip4h->saddr, }; + if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + goto err; + + ip4h = ip_hdr(skb); + fl4.daddr = ip4h->daddr; + fl4.saddr = ip4h->saddr; + fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)); + rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto err; @@ -488,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) struct net_device *dev = skb->dev; int err, ret = NET_XMIT_DROP; + if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) { + DEV_STATS_INC(dev, tx_errors); + kfree_skb(skb); + return ret; + } + err = ipvlan_route_v6_outbound(dev, skb); if (unlikely(err)) { DEV_STATS_INC(dev, tx_errors); From fa52f15c745ce55261b92873676f64f7348cfe82 Mon Sep 17 00:00:00 2001 From: Sean Anderson Date: Thu, 20 Feb 2025 11:29:50 -0500 Subject: [PATCH 14/24] net: cadence: macb: Synchronize stats calculations Stats calculations involve a RMW to add the stat update to the existing value. This is currently not protected by any synchronization mechanism, so data races are possible. Add a spinlock to protect the update. The reader side could be protected using u64_stats, but we would still need a spinlock for the update side anyway. And we always do an update immediately before reading the stats anyway. Fixes: 89e5785fc8a6 ("[PATCH] Atmel MACB ethernet driver") Signed-off-by: Sean Anderson Link: https://patch.msgid.link/20250220162950.95941-1-sean.anderson@linux.dev Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/cadence/macb.h | 2 ++ drivers/net/ethernet/cadence/macb_main.c | 12 ++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 5740c98d8c9f..2847278d9cd4 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -1279,6 +1279,8 @@ struct macb { struct clk *rx_clk; struct clk *tsu_clk; struct net_device *dev; + /* Protects hw_stats and ethtool_stats */ + spinlock_t stats_lock; union { struct macb_stats macb; struct gem_stats gem; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 48496209fb16..c1f57d96e63f 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1978,10 +1978,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (status & MACB_BIT(ISR_ROVR)) { /* We missed at least one packet */ + spin_lock(&bp->stats_lock); if (macb_is_gem(bp)) bp->hw_stats.gem.rx_overruns++; else bp->hw_stats.macb.rx_overruns++; + spin_unlock(&bp->stats_lock); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); @@ -3102,6 +3104,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) if (!netif_running(bp->dev)) return nstat; + spin_lock_irq(&bp->stats_lock); gem_update_stats(bp); nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + @@ -3131,6 +3134,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; nstat->tx_fifo_errors = hwstat->tx_underrun; + spin_unlock_irq(&bp->stats_lock); return nstat; } @@ -3138,12 +3142,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) static void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { - struct macb *bp; + struct macb *bp = netdev_priv(dev); - bp = netdev_priv(dev); + spin_lock_irq(&bp->stats_lock); gem_update_stats(bp); memcpy(data, &bp->ethtool_stats, sizeof(u64) * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); + spin_unlock_irq(&bp->stats_lock); } static int gem_get_sset_count(struct net_device *dev, int sset) @@ -3193,6 +3198,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) return gem_get_stats(bp); /* read stats from hardware */ + spin_lock_irq(&bp->stats_lock); macb_update_stats(bp); /* Convert HW stats into netdevice stats */ @@ -3226,6 +3232,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) nstat->tx_carrier_errors = hwstat->tx_carrier_errors; nstat->tx_fifo_errors = hwstat->tx_underruns; /* Don't know about heartbeat or window errors... */ + spin_unlock_irq(&bp->stats_lock); return nstat; } @@ -5097,6 +5104,7 @@ static int macb_probe(struct platform_device *pdev) } } spin_lock_init(&bp->lock); + spin_lock_init(&bp->stats_lock); /* setup capabilities */ macb_configure_caps(bp, macb_config); From 28b04731a38c80092f47437af6c2770765e0b99f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 20 Feb 2025 16:50:12 -0800 Subject: [PATCH 15/24] MAINTAINERS: fix DWMAC S32 entry Using L: with more than a bare email address causes getmaintainer.pl to be unable to parse the entry. Fix this by doing as other entries that use this email address and convert it to an R: entry. Link: https://patch.msgid.link/20250221005012.1051897-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 3864d473f52f..ac15093537c6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2877,7 +2877,7 @@ F: drivers/pinctrl/nxp/ ARM/NXP S32G/S32R DWMAC ETHERNET DRIVER M: Jan Petrous -L: NXP S32 Linux Team +R: s32@nxp.com S: Maintained F: Documentation/devicetree/bindings/net/nxp,s32-dwmac.yaml F: drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c From f15176b8b6e72ac30e14fd273282d2b72562d26b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 20 Feb 2025 19:48:15 +0100 Subject: [PATCH 16/24] net: dsa: rtl8366rb: Fix compilation problem When the kernel is compiled without LED framework support the rtl8366rb fails to build like this: rtl8366rb.o: in function `rtl8366rb_setup_led': rtl8366rb.c:953:(.text.unlikely.rtl8366rb_setup_led+0xe8): undefined reference to `led_init_default_state_get' rtl8366rb.c:980:(.text.unlikely.rtl8366rb_setup_led+0x240): undefined reference to `devm_led_classdev_register_ext' As this is constantly coming up in different randconfig builds, bite the bullet and create a separate file for the offending code, split out a header with all stuff needed both in the core driver and the leds code. Add a new bool Kconfig option for the LED compile target, such that it depends on LEDS_CLASS=y || LEDS_CLASS=RTL8366RB which make LED support always available when LEDS_CLASS is compiled into the kernel and enforce that if the LEDS_CLASS is a module, then the RTL8366RB driver needs to be a module as well so that modprobe can resolve the dependencies. Fixes: 32d617005475 ("net: dsa: realtek: add LED drivers for rtl8366rb") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202502070525.xMUImayb-lkp@intel.com/ Signed-off-by: Linus Walleij Reviewed-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/realtek/Kconfig | 6 + drivers/net/dsa/realtek/Makefile | 3 + drivers/net/dsa/realtek/rtl8366rb-leds.c | 177 ++++++++++++++++ drivers/net/dsa/realtek/rtl8366rb.c | 258 +---------------------- drivers/net/dsa/realtek/rtl8366rb.h | 107 ++++++++++ 5 files changed, 299 insertions(+), 252 deletions(-) create mode 100644 drivers/net/dsa/realtek/rtl8366rb-leds.c create mode 100644 drivers/net/dsa/realtek/rtl8366rb.h diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig index 6989972eebc3..10687722d14c 100644 --- a/drivers/net/dsa/realtek/Kconfig +++ b/drivers/net/dsa/realtek/Kconfig @@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB help Select to enable support for Realtek RTL8366RB. +config NET_DSA_REALTEK_RTL8366RB_LEDS + bool "Support RTL8366RB LED control" + depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB) + depends on NET_DSA_REALTEK_RTL8366RB + default NET_DSA_REALTEK_RTL8366RB + endif diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile index 35491dc20d6d..17367bcba496 100644 --- a/drivers/net/dsa/realtek/Makefile +++ b/drivers/net/dsa/realtek/Makefile @@ -12,4 +12,7 @@ endif obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o rtl8366-objs := rtl8366-core.o rtl8366rb.o +ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS +rtl8366-objs += rtl8366rb-leds.o +endif obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c new file mode 100644 index 000000000000..99c890681ae6 --- /dev/null +++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include "rtl83xx.h" +#include "rtl8366rb.h" + +static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port) +{ + switch (led_group) { + case 0: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 1: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 2: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 3: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + default: + return 0; + } +} + +static int rb8366rb_get_port_led(struct rtl8366rb_led *led) +{ + struct realtek_priv *priv = led->priv; + u8 led_group = led->led_group; + u8 port_num = led->port_num; + int ret; + u32 val; + + ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group), + &val); + if (ret) { + dev_err(priv->dev, "error reading LED on port %d group %d\n", + led_group, port_num); + return ret; + } + + return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num)); +} + +static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable) +{ + struct realtek_priv *priv = led->priv; + u8 led_group = led->led_group; + u8 port_num = led->port_num; + int ret; + + ret = regmap_update_bits(priv->map, + RTL8366RB_LED_X_X_CTRL_REG(led_group), + rtl8366rb_led_group_port_mask(led_group, + port_num), + enable ? 0xffff : 0); + if (ret) { + dev_err(priv->dev, "error updating LED on port %d group %d\n", + led_group, port_num); + return ret; + } + + /* Change the LED group to manual controlled LEDs if required */ + ret = rb8366rb_set_ledgroup_mode(priv, led_group, + RTL8366RB_LEDGROUP_FORCE); + + if (ret) { + dev_err(priv->dev, "error updating LED GROUP group %d\n", + led_group); + return ret; + } + + return 0; +} + +static int +rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev, + enum led_brightness brightness) +{ + struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led, + cdev); + + return rb8366rb_set_port_led(led, brightness == LED_ON); +} + +static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp, + struct fwnode_handle *led_fwnode) +{ + struct rtl8366rb *rb = priv->chip_data; + struct led_init_data init_data = { }; + enum led_default_state state; + struct rtl8366rb_led *led; + u32 led_group; + int ret; + + ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group); + if (ret) + return ret; + + if (led_group >= RTL8366RB_NUM_LEDGROUPS) { + dev_warn(priv->dev, "Invalid LED reg %d defined for port %d", + led_group, dp->index); + return -EINVAL; + } + + led = &rb->leds[dp->index][led_group]; + led->port_num = dp->index; + led->led_group = led_group; + led->priv = priv; + + state = led_init_default_state_get(led_fwnode); + switch (state) { + case LEDS_DEFSTATE_ON: + led->cdev.brightness = 1; + rb8366rb_set_port_led(led, 1); + break; + case LEDS_DEFSTATE_KEEP: + led->cdev.brightness = + rb8366rb_get_port_led(led); + break; + case LEDS_DEFSTATE_OFF: + default: + led->cdev.brightness = 0; + rb8366rb_set_port_led(led, 0); + } + + led->cdev.max_brightness = 1; + led->cdev.brightness_set_blocking = + rtl8366rb_cled_brightness_set_blocking; + init_data.fwnode = led_fwnode; + init_data.devname_mandatory = true; + + init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d", + dp->ds->index, dp->index, led_group); + if (!init_data.devicename) + return -ENOMEM; + + ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data); + if (ret) { + dev_warn(priv->dev, "Failed to init LED %d for port %d", + led_group, dp->index); + return ret; + } + + return 0; +} + +int rtl8366rb_setup_leds(struct realtek_priv *priv) +{ + struct dsa_switch *ds = &priv->ds; + struct device_node *leds_np; + struct dsa_port *dp; + int ret = 0; + + dsa_switch_for_each_port(dp, ds) { + if (!dp->dn) + continue; + + leds_np = of_get_child_by_name(dp->dn, "leds"); + if (!leds_np) { + dev_dbg(priv->dev, "No leds defined for port %d", + dp->index); + continue; + } + + for_each_child_of_node_scoped(leds_np, led_np) { + ret = rtl8366rb_setup_led(priv, dp, + of_fwnode_handle(led_np)); + if (ret) + break; + } + + of_node_put(leds_np); + if (ret) + return ret; + } + return 0; +} diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index 4c4a95d4380c..f54771cab56d 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -27,11 +27,7 @@ #include "realtek-smi.h" #include "realtek-mdio.h" #include "rtl83xx.h" - -#define RTL8366RB_PORT_NUM_CPU 5 -#define RTL8366RB_NUM_PORTS 6 -#define RTL8366RB_PHY_NO_MAX 4 -#define RTL8366RB_PHY_ADDR_MAX 31 +#include "rtl8366rb.h" /* Switch Global Configuration register */ #define RTL8366RB_SGCR 0x0000 @@ -176,39 +172,6 @@ */ #define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f -/* LED control registers */ -/* The LED blink rate is global; it is used by all triggers in all groups. */ -#define RTL8366RB_LED_BLINKRATE_REG 0x0430 -#define RTL8366RB_LED_BLINKRATE_MASK 0x0007 -#define RTL8366RB_LED_BLINKRATE_28MS 0x0000 -#define RTL8366RB_LED_BLINKRATE_56MS 0x0001 -#define RTL8366RB_LED_BLINKRATE_84MS 0x0002 -#define RTL8366RB_LED_BLINKRATE_111MS 0x0003 -#define RTL8366RB_LED_BLINKRATE_222MS 0x0004 -#define RTL8366RB_LED_BLINKRATE_446MS 0x0005 - -/* LED trigger event for each group */ -#define RTL8366RB_LED_CTRL_REG 0x0431 -#define RTL8366RB_LED_CTRL_OFFSET(led_group) \ - (4 * (led_group)) -#define RTL8366RB_LED_CTRL_MASK(led_group) \ - (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group)) - -/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only - * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is - * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored. - */ -#define RTL8366RB_LED_0_1_CTRL_REG 0x0432 -#define RTL8366RB_LED_2_3_CTRL_REG 0x0433 -#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \ - ((led_group) <= 1 ? \ - RTL8366RB_LED_0_1_CTRL_REG : \ - RTL8366RB_LED_2_3_CTRL_REG) -#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0) -#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6) -#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0) -#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6) - #define RTL8366RB_MIB_COUNT 33 #define RTL8366RB_GLOBAL_MIB_COUNT 1 #define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050 @@ -244,7 +207,6 @@ #define RTL8366RB_PORT_STATUS_AN_MASK 0x0080 #define RTL8366RB_NUM_VLANS 16 -#define RTL8366RB_NUM_LEDGROUPS 4 #define RTL8366RB_NUM_VIDS 4096 #define RTL8366RB_PRIORITYMAX 7 #define RTL8366RB_NUM_FIDS 8 @@ -351,46 +313,6 @@ #define RTL8366RB_GREEN_FEATURE_TX BIT(0) #define RTL8366RB_GREEN_FEATURE_RX BIT(2) -enum rtl8366_ledgroup_mode { - RTL8366RB_LEDGROUP_OFF = 0x0, - RTL8366RB_LEDGROUP_DUP_COL = 0x1, - RTL8366RB_LEDGROUP_LINK_ACT = 0x2, - RTL8366RB_LEDGROUP_SPD1000 = 0x3, - RTL8366RB_LEDGROUP_SPD100 = 0x4, - RTL8366RB_LEDGROUP_SPD10 = 0x5, - RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6, - RTL8366RB_LEDGROUP_SPD100_ACT = 0x7, - RTL8366RB_LEDGROUP_SPD10_ACT = 0x8, - RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9, - RTL8366RB_LEDGROUP_FIBER = 0xa, - RTL8366RB_LEDGROUP_AN_FAULT = 0xb, - RTL8366RB_LEDGROUP_LINK_RX = 0xc, - RTL8366RB_LEDGROUP_LINK_TX = 0xd, - RTL8366RB_LEDGROUP_MASTER = 0xe, - RTL8366RB_LEDGROUP_FORCE = 0xf, - - __RTL8366RB_LEDGROUP_MODE_MAX -}; - -struct rtl8366rb_led { - u8 port_num; - u8 led_group; - struct realtek_priv *priv; - struct led_classdev cdev; -}; - -/** - * struct rtl8366rb - RTL8366RB-specific data - * @max_mtu: per-port max MTU setting - * @pvid_enabled: if PVID is set for respective port - * @leds: per-port and per-ledgroup led info - */ -struct rtl8366rb { - unsigned int max_mtu[RTL8366RB_NUM_PORTS]; - bool pvid_enabled[RTL8366RB_NUM_PORTS]; - struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS]; -}; - static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { { 0, 0, 4, "IfInOctets" }, { 0, 4, 4, "EtherStatsOctets" }, @@ -831,9 +753,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table, return 0; } -static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, - u8 led_group, - enum rtl8366_ledgroup_mode mode) +/* This code is used also with LEDs disabled */ +int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, + u8 led_group, + enum rtl8366_ledgroup_mode mode) { int ret; u32 val; @@ -850,144 +773,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, return 0; } -static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port) -{ - switch (led_group) { - case 0: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - case 1: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - case 2: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - case 3: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - default: - return 0; - } -} - -static int rb8366rb_get_port_led(struct rtl8366rb_led *led) -{ - struct realtek_priv *priv = led->priv; - u8 led_group = led->led_group; - u8 port_num = led->port_num; - int ret; - u32 val; - - ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group), - &val); - if (ret) { - dev_err(priv->dev, "error reading LED on port %d group %d\n", - led_group, port_num); - return ret; - } - - return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num)); -} - -static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable) -{ - struct realtek_priv *priv = led->priv; - u8 led_group = led->led_group; - u8 port_num = led->port_num; - int ret; - - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_X_X_CTRL_REG(led_group), - rtl8366rb_led_group_port_mask(led_group, - port_num), - enable ? 0xffff : 0); - if (ret) { - dev_err(priv->dev, "error updating LED on port %d group %d\n", - led_group, port_num); - return ret; - } - - /* Change the LED group to manual controlled LEDs if required */ - ret = rb8366rb_set_ledgroup_mode(priv, led_group, - RTL8366RB_LEDGROUP_FORCE); - - if (ret) { - dev_err(priv->dev, "error updating LED GROUP group %d\n", - led_group); - return ret; - } - - return 0; -} - -static int -rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev, - enum led_brightness brightness) -{ - struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led, - cdev); - - return rb8366rb_set_port_led(led, brightness == LED_ON); -} - -static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp, - struct fwnode_handle *led_fwnode) -{ - struct rtl8366rb *rb = priv->chip_data; - struct led_init_data init_data = { }; - enum led_default_state state; - struct rtl8366rb_led *led; - u32 led_group; - int ret; - - ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group); - if (ret) - return ret; - - if (led_group >= RTL8366RB_NUM_LEDGROUPS) { - dev_warn(priv->dev, "Invalid LED reg %d defined for port %d", - led_group, dp->index); - return -EINVAL; - } - - led = &rb->leds[dp->index][led_group]; - led->port_num = dp->index; - led->led_group = led_group; - led->priv = priv; - - state = led_init_default_state_get(led_fwnode); - switch (state) { - case LEDS_DEFSTATE_ON: - led->cdev.brightness = 1; - rb8366rb_set_port_led(led, 1); - break; - case LEDS_DEFSTATE_KEEP: - led->cdev.brightness = - rb8366rb_get_port_led(led); - break; - case LEDS_DEFSTATE_OFF: - default: - led->cdev.brightness = 0; - rb8366rb_set_port_led(led, 0); - } - - led->cdev.max_brightness = 1; - led->cdev.brightness_set_blocking = - rtl8366rb_cled_brightness_set_blocking; - init_data.fwnode = led_fwnode; - init_data.devname_mandatory = true; - - init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d", - dp->ds->index, dp->index, led_group); - if (!init_data.devicename) - return -ENOMEM; - - ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data); - if (ret) { - dev_warn(priv->dev, "Failed to init LED %d for port %d", - led_group, dp->index); - return ret; - } - - return 0; -} - +/* This code is used also with LEDs disabled */ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv) { int ret = 0; @@ -1008,38 +794,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv) return ret; } -static int rtl8366rb_setup_leds(struct realtek_priv *priv) -{ - struct dsa_switch *ds = &priv->ds; - struct device_node *leds_np; - struct dsa_port *dp; - int ret = 0; - - dsa_switch_for_each_port(dp, ds) { - if (!dp->dn) - continue; - - leds_np = of_get_child_by_name(dp->dn, "leds"); - if (!leds_np) { - dev_dbg(priv->dev, "No leds defined for port %d", - dp->index); - continue; - } - - for_each_child_of_node_scoped(leds_np, led_np) { - ret = rtl8366rb_setup_led(priv, dp, - of_fwnode_handle(led_np)); - if (ret) - break; - } - - of_node_put(leds_np); - if (ret) - return ret; - } - return 0; -} - static int rtl8366rb_setup(struct dsa_switch *ds) { struct realtek_priv *priv = ds->priv; diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h new file mode 100644 index 000000000000..685ff3275faa --- /dev/null +++ b/drivers/net/dsa/realtek/rtl8366rb.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _RTL8366RB_H +#define _RTL8366RB_H + +#include "realtek.h" + +#define RTL8366RB_PORT_NUM_CPU 5 +#define RTL8366RB_NUM_PORTS 6 +#define RTL8366RB_PHY_NO_MAX 4 +#define RTL8366RB_NUM_LEDGROUPS 4 +#define RTL8366RB_PHY_ADDR_MAX 31 + +/* LED control registers */ +/* The LED blink rate is global; it is used by all triggers in all groups. */ +#define RTL8366RB_LED_BLINKRATE_REG 0x0430 +#define RTL8366RB_LED_BLINKRATE_MASK 0x0007 +#define RTL8366RB_LED_BLINKRATE_28MS 0x0000 +#define RTL8366RB_LED_BLINKRATE_56MS 0x0001 +#define RTL8366RB_LED_BLINKRATE_84MS 0x0002 +#define RTL8366RB_LED_BLINKRATE_111MS 0x0003 +#define RTL8366RB_LED_BLINKRATE_222MS 0x0004 +#define RTL8366RB_LED_BLINKRATE_446MS 0x0005 + +/* LED trigger event for each group */ +#define RTL8366RB_LED_CTRL_REG 0x0431 +#define RTL8366RB_LED_CTRL_OFFSET(led_group) \ + (4 * (led_group)) +#define RTL8366RB_LED_CTRL_MASK(led_group) \ + (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group)) + +/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only + * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is + * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored. + */ +#define RTL8366RB_LED_0_1_CTRL_REG 0x0432 +#define RTL8366RB_LED_2_3_CTRL_REG 0x0433 +#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \ + ((led_group) <= 1 ? \ + RTL8366RB_LED_0_1_CTRL_REG : \ + RTL8366RB_LED_2_3_CTRL_REG) +#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0) +#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6) +#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0) +#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6) + +enum rtl8366_ledgroup_mode { + RTL8366RB_LEDGROUP_OFF = 0x0, + RTL8366RB_LEDGROUP_DUP_COL = 0x1, + RTL8366RB_LEDGROUP_LINK_ACT = 0x2, + RTL8366RB_LEDGROUP_SPD1000 = 0x3, + RTL8366RB_LEDGROUP_SPD100 = 0x4, + RTL8366RB_LEDGROUP_SPD10 = 0x5, + RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6, + RTL8366RB_LEDGROUP_SPD100_ACT = 0x7, + RTL8366RB_LEDGROUP_SPD10_ACT = 0x8, + RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9, + RTL8366RB_LEDGROUP_FIBER = 0xa, + RTL8366RB_LEDGROUP_AN_FAULT = 0xb, + RTL8366RB_LEDGROUP_LINK_RX = 0xc, + RTL8366RB_LEDGROUP_LINK_TX = 0xd, + RTL8366RB_LEDGROUP_MASTER = 0xe, + RTL8366RB_LEDGROUP_FORCE = 0xf, + + __RTL8366RB_LEDGROUP_MODE_MAX +}; + +#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS) + +struct rtl8366rb_led { + u8 port_num; + u8 led_group; + struct realtek_priv *priv; + struct led_classdev cdev; +}; + +int rtl8366rb_setup_leds(struct realtek_priv *priv); + +#else + +static inline int rtl8366rb_setup_leds(struct realtek_priv *priv) +{ + return 0; +} + +#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */ + +/** + * struct rtl8366rb - RTL8366RB-specific data + * @max_mtu: per-port max MTU setting + * @pvid_enabled: if PVID is set for respective port + * @leds: per-port and per-ledgroup led info + */ +struct rtl8366rb { + unsigned int max_mtu[RTL8366RB_NUM_PORTS]; + bool pvid_enabled[RTL8366RB_NUM_PORTS]; +#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS) + struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS]; +#endif +}; + +/* This code is used also with LEDs disabled */ +int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, + u8 led_group, + enum rtl8366_ledgroup_mode mode); + +#endif /* _RTL8366RB_H */ From 705919c5df338633553d2bb4f09b3181479e4345 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Mon, 24 Feb 2025 13:22:46 +0100 Subject: [PATCH 17/24] DO-NOT-MERGE: git markup: net All commits older than this one are from Netdev's net repo: https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/ Following commits are fixes for other trees than the net ones or imported from elsewhere. We should not send these commits to either -net or net-next. This commit is useful to easily find the "bottom of the tree". Signed-off-by: Matthieu Baerts --- .b4-config | 3 +++ .git_markup | 2 ++ 2 files changed, 5 insertions(+) create mode 100644 .b4-config create mode 100644 .git_markup diff --git a/.b4-config b/.b4-config new file mode 100644 index 000000000000..a0c682e364af --- /dev/null +++ b/.b4-config @@ -0,0 +1,3 @@ +[b4] + send-series-to = MPTCP Upstream + send-prefixes = mptcp-net diff --git a/.git_markup b/.git_markup new file mode 100644 index 000000000000..b8deab34c802 --- /dev/null +++ b/.git_markup @@ -0,0 +1,2 @@ +Fixes for other trees than the -net ones or imported from elsewhere. +We should not send these commits to either -net or net-next. From 5f700f0a3b9a076fcf2ee6392122ed3a6769e817 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Mon, 24 Feb 2025 13:22:47 +0100 Subject: [PATCH 18/24] DO-NOT-MERGE: git markup: fixes other trees All commits older than this one are fixes for other trees than the -net one or imported from elsewhere. We should not send these commits to either -net or net-next or at least not as "MPTCP fixes". Typically they are fixing issues blocking our tests or dev. Following commits are MPTCP-related fixes for the -net tree. This commit is useful to easily find where are the fixes from/for other trees. Signed-off-by: Matthieu Baerts --- .git_markup | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.git_markup b/.git_markup index b8deab34c802..db48e96ccd7c 100644 --- a/.git_markup +++ b/.git_markup @@ -1,2 +1 @@ -Fixes for other trees than the -net ones or imported from elsewhere. -We should not send these commits to either -net or net-next. +MPTCP-related fixes for -net tree. From a7122f4a63b64a40c70b3ed081c2211e93aba56f Mon Sep 17 00:00:00 2001 From: "Matthieu Baerts (NGI0)" Date: Mon, 24 Feb 2025 13:22:48 +0100 Subject: [PATCH 19/24] mptcp: reset when MPTCP opts are dropped after join Before this patch, if the checksum was not used, the subflow was only reset if map_data_len was != 0. If there were no MPTCP options or an invalid mapping, map_data_len was not set to the data len, and then the subflow was not reset as it should have been, leaving the MPTCP connection in a wrong fallback mode. This map_data_len condition has been introduced to handle the reception of the infinite mapping. Instead, a new dedicated mapping error could have been returned and treated as a special case. However, the commit 31bf11de146c ("mptcp: introduce MAPPING_BAD_CSUM") has been introduced by Paolo Abeni soon after, and backported later on to stable. It better handle the csum case, and it means the exception for valid_csum_seen in subflow_can_fallback(), plus this one for the infinite mapping in subflow_check_data_avail(), are no longer needed. In other words, the code can be simplified there: a fallback should only be done if msk->allow_infinite_fallback is set. This boolean is set to false once MPTCP-specific operations acting on the whole MPTCP connection vs the initial path have been done, e.g. a second path has been created, or an MPTCP re-injection -- yes, possible even with a single subflow. The subflow_can_fallback() helper can then be dropped, and replaced by this single condition. This also makes the code clearer: a fallback should only be done if it is possible to do so. While at it, no need to set map_data_len to 0 in get_mapping_status() for the infinite mapping case: it will be set to skb->len just after, at the end of subflow_check_data_avail(), and not read in between. Fixes: f8d4bcacff3b ("mptcp: infinite mapping receiving") Reported-by: Chester A. Unal Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/544 Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts (NGI0) --- net/mptcp/subflow.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index dfcbef9c4624..9f18217dddc8 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1142,7 +1142,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk, if (data_len == 0) { pr_debug("infinite mapping received\n"); MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); - subflow->map_data_len = 0; return MAPPING_INVALID; } @@ -1286,18 +1285,6 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss mptcp_schedule_work(sk); } -static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) -{ - struct mptcp_sock *msk = mptcp_sk(subflow->conn); - - if (subflow->mp_join) - return false; - else if (READ_ONCE(msk->csum_enabled)) - return !subflow->valid_csum_seen; - else - return READ_ONCE(msk->allow_infinite_fallback); -} - static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); @@ -1393,7 +1380,7 @@ static bool subflow_check_data_avail(struct sock *ssk) return true; } - if (!subflow_can_fallback(subflow) && subflow->map_data_len) { + if (!READ_ONCE(msk->allow_infinite_fallback)) { /* fatal protocol error, close the socket. * subflow_error_report() will introduce the appropriate barriers */ From 7ec1ca0ae6c1a88a599940cb1af0d23a2e95fe61 Mon Sep 17 00:00:00 2001 From: "Matthieu Baerts (NGI0)" Date: Mon, 24 Feb 2025 13:22:49 +0100 Subject: [PATCH 20/24] mptcp: safety check before fallback Recently, some fallback have been initiated, while the connection was not supposed to fallback. Add a safety check with a warning to detect when an wrong attempt to fallback is being done. This should help detecting any future issues quicker. Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts (NGI0) --- net/mptcp/protocol.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index f6a207958459..ad21925af061 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -1199,6 +1199,8 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk) pr_debug("TCP fallback already done (msk=%p)\n", msk); return; } + if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback))) + return; set_bit(MPTCP_FALLBACK_DONE, &msk->flags); } From 24883ae42cb75156ec98bbd2536f2f84357f8d1a Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 24 Feb 2025 13:22:50 +0100 Subject: [PATCH 21/24] mptcp: always handle address removal under msk socket lock Syzkaller reported a lockdep splat in the PM control path: WARNING: CPU: 0 PID: 6693 at ./include/net/sock.h:1711 sock_owned_by_me include/net/sock.h:1711 [inline] WARNING: CPU: 0 PID: 6693 at ./include/net/sock.h:1711 msk_owned_by_me net/mptcp/protocol.h:363 [inline] WARNING: CPU: 0 PID: 6693 at ./include/net/sock.h:1711 mptcp_pm_nl_addr_send_ack+0x57c/0x610 net/mptcp/pm_netlink.c:788 Modules linked in: CPU: 0 UID: 0 PID: 6693 Comm: syz.0.205 Not tainted 6.14.0-rc2-syzkaller-00303-gad1b832bf1cf #0 Hardware name: Google Compute Engine/Google Compute Engine, BIOS Google 12/27/2024 RIP: 0010:sock_owned_by_me include/net/sock.h:1711 [inline] RIP: 0010:msk_owned_by_me net/mptcp/protocol.h:363 [inline] RIP: 0010:mptcp_pm_nl_addr_send_ack+0x57c/0x610 net/mptcp/pm_netlink.c:788 Code: 5b 41 5c 41 5d 41 5e 41 5f 5d c3 cc cc cc cc e8 ca 7b d3 f5 eb b9 e8 c3 7b d3 f5 90 0f 0b 90 e9 dd fb ff ff e8 b5 7b d3 f5 90 <0f> 0b 90 e9 3e fb ff ff 44 89 f1 80 e1 07 38 c1 0f 8c eb fb ff ff RSP: 0000:ffffc900034f6f60 EFLAGS: 00010283 RAX: ffffffff8bee3c2b RBX: 0000000000000001 RCX: 0000000000080000 RDX: ffffc90004d42000 RSI: 000000000000a407 RDI: 000000000000a408 RBP: ffffc900034f7030 R08: ffffffff8bee37f6 R09: 0100000000000000 R10: dffffc0000000000 R11: ffffed100bcc62e4 R12: ffff88805e6316e0 R13: ffff88805e630c00 R14: dffffc0000000000 R15: ffff88805e630c00 FS: 00007f7e9a7e96c0(0000) GS:ffff8880b8600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000001b2fd18ff8 CR3: 0000000032c24000 CR4: 00000000003526f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: mptcp_pm_remove_addr+0x103/0x1d0 net/mptcp/pm.c:59 mptcp_pm_remove_anno_addr+0x1f4/0x2f0 net/mptcp/pm_netlink.c:1486 mptcp_nl_remove_subflow_and_signal_addr net/mptcp/pm_netlink.c:1518 [inline] mptcp_pm_nl_del_addr_doit+0x118d/0x1af0 net/mptcp/pm_netlink.c:1629 genl_family_rcv_msg_doit net/netlink/genetlink.c:1115 [inline] genl_family_rcv_msg net/netlink/genetlink.c:1195 [inline] genl_rcv_msg+0xb1f/0xec0 net/netlink/genetlink.c:1210 netlink_rcv_skb+0x206/0x480 net/netlink/af_netlink.c:2543 genl_rcv+0x28/0x40 net/netlink/genetlink.c:1219 netlink_unicast_kernel net/netlink/af_netlink.c:1322 [inline] netlink_unicast+0x7f6/0x990 net/netlink/af_netlink.c:1348 netlink_sendmsg+0x8de/0xcb0 net/netlink/af_netlink.c:1892 sock_sendmsg_nosec net/socket.c:718 [inline] __sock_sendmsg+0x221/0x270 net/socket.c:733 ____sys_sendmsg+0x53a/0x860 net/socket.c:2573 ___sys_sendmsg net/socket.c:2627 [inline] __sys_sendmsg+0x269/0x350 net/socket.c:2659 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7f7e9998cde9 Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 a8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f7e9a7e9038 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 00007f7e99ba5fa0 RCX: 00007f7e9998cde9 RDX: 000000002000c094 RSI: 0000400000000000 RDI: 0000000000000007 RBP: 00007f7e99a0e2a0 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 0000000000000000 R14: 00007f7e99ba5fa0 R15: 00007fff49231088 Indeed the PM can try to send a RM_ADDR over a msk without acquiring first the msk socket lock. The bugged code-path comes from an early optimization: when there are no subflows, the PM should (usually) not send RM_ADDR notifications. The above statement is incorrect, as without locks another process could concurrent create a new subflow and cause the RM_ADDR generation. Additionally the supposed optimization is not very effective even performance-wise, as most mptcp sockets should have at least one subflow: the MPC one. Address the issue removing the buggy code path, the existing "slow-path" will handle correctly even the edge case. Reported-by: syzbot+cd3ce3d03a3393ae9700@syzkaller.appspotmail.com Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/546 Fixes: b6c08380860b ("mptcp: remove addr and subflow in PM netlink") Signed-off-by: Paolo Abeni Reviewed-by: Matthieu Baerts (NGI0) --- net/mptcp/pm_netlink.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 572d160edca3..c0e47f4f7b1a 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -1514,11 +1514,6 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, if (mptcp_pm_is_userspace(msk)) goto next; - if (list_empty(&msk->conn_list)) { - mptcp_pm_remove_anno_addr(msk, addr, false); - goto next; - } - lock_sock(sk); remove_subflow = mptcp_lookup_subflow_by_saddr(&msk->conn_list, addr); mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && From bcb20f60b3d129e3c447a9ef991a0bfb998c73eb Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Mon, 24 Feb 2025 13:22:51 +0100 Subject: [PATCH 22/24] DO-NOT-MERGE: git markup: fixes net All commits older than this one are MPTCP-related fixes for -net tree. Following commits are MPTCP-related modifications needed for public CIs. This commit is useful to easily find where are MPTCP-related fixes for the -net tree. Signed-off-by: Matthieu Baerts --- .git_markup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.git_markup b/.git_markup index db48e96ccd7c..2bbf8d0d5a7d 100644 --- a/.git_markup +++ b/.git_markup @@ -1 +1 @@ -MPTCP-related fixes for -net tree. +MPTCP-related modifications needed for public CIs. From b21e28e8dc644bd98d5adbce5faf51a711170a45 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Mon, 24 Feb 2025 13:22:52 +0100 Subject: [PATCH 23/24] DO-NOT-MERGE: mptcp: add CI support Currently supported: - Github Actions: - build-validation: check different combinations of validation - checkpatch: use checkpatch.pl to validate new commits - notif: send notifications to the IRC channel and emails - Update TopGit tree: sync with net and net-next and then override the export branches. - Cirrus: - Run tests in a KVM: selftests, kunit, packetdrill, etc. Signed-off-by: Matthieu Baerts --- .github/FUNDING.yml | 4 + .github/ISSUE_TEMPLATE/01-bug.yml | 104 +++++ .github/ISSUE_TEMPLATE/02-feature.yml | 49 +++ .github/ISSUE_TEMPLATE/03-question.yml | 28 ++ .github/workflows/build-validation.yml | 334 +++++++++++++++ .github/workflows/checkpatch.yml | 191 +++++++++ .github/workflows/notif.yml | 56 +++ .github/workflows/tests.yml | 562 +++++++++++++++++++++++++ .github/workflows/update-tg-tree.yml | 54 +++ SECURITY.md | 17 + 10 files changed, 1399 insertions(+) create mode 100644 .github/FUNDING.yml create mode 100644 .github/ISSUE_TEMPLATE/01-bug.yml create mode 100644 .github/ISSUE_TEMPLATE/02-feature.yml create mode 100644 .github/ISSUE_TEMPLATE/03-question.yml create mode 100644 .github/workflows/build-validation.yml create mode 100644 .github/workflows/checkpatch.yml create mode 100644 .github/workflows/notif.yml create mode 100644 .github/workflows/tests.yml create mode 100644 .github/workflows/update-tg-tree.yml create mode 100644 SECURITY.md diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000000..4e039626d5a5 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,4 @@ +github: [matttbe] +liberapay: matttbe +patreon: matttbe +thanks_dev: u/gh/matttbe diff --git a/.github/ISSUE_TEMPLATE/01-bug.yml b/.github/ISSUE_TEMPLATE/01-bug.yml new file mode 100644 index 000000000000..a0784f2f872e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/01-bug.yml @@ -0,0 +1,104 @@ +name: Bug Report +description: Create a report to help us improve +labels: ["bug", "triage"] +body: + - type: markdown + attributes: + value: | + Thanks for helping us improve! 🙏 + Please answer these questions and provide as much information as possible about your problem. + + - type: checkboxes + id: pre-req + attributes: + label: Pre-requisites + description: "Before opening this ticket, I checked that:" + options: + - label: "A similar [issue](https://github.com/multipath-tcp/mptcp_net-next/issues/) has not been reported before." + - label: "[mptcp.dev](https://www.mptcp.dev) website does not cover my case." + - label: "An up-to-date kernel is being used." + - label: "This case is not fixed with the latest stable (or LTS) version listed on [kernel.org](https://kernel.org)" + + - type: textarea + id: what-did-you-do + attributes: + label: "What did you do?" + description: "If possible, provide a recipe for reproducing the error." + placeholder: | + Steps to reproduce the behavior: + 1. + 2. + validations: + required: true + + - type: textarea + id: actual-behavior + attributes: + label: "What happened?" + description: Prefer copying text output over using screenshots. + validations: + required: true + + - type: textarea + id: expected-behavior + attributes: + label: "What did you expect to have?" + description: Why is the current behavior incorrect, and any additional context we may need to understand the issue. + validations: + required: true + + - type: textarea + id: system-client + attributes: + label: "System info: Client" + description: | + Output of these commands executed on the **client** side: + ``` + uname -a + cat /etc/os-release + sysctl net.mptcp + ip mptcp endpoint show + ip mptcp limits show + ``` + placeholder: | + $ uname -a + Linux my-client 6.12.24-amd64 #1 SMP PREEMPT_DYNAMIC Debian 6.12.24-1 (2025-02-01) x86_64 GNU/Linux + (...) + render: shell + validations: + required: true + + - type: textarea + id: system-server + attributes: + label: "System info: Server" + description: | + Output of these commands executed on the **server** side: + ``` + uname -a + cat /etc/os-release + sysctl net.mptcp + ip mptcp endpoint show + ip mptcp limits show + ``` + placeholder: | + $ uname -a + Linux my-server 6.12.24-amd64 #1 SMP PREEMPT_DYNAMIC Debian 6.12.24-1 (2025-02-01) x86_64 GNU/Linux + (...) + render: shell + validations: + required: true + + - type: textarea + id: additional-context + attributes: + label: "Additional context" + description: | + Add any other context about the problem here. + Note: It might help to get the output of `ip mptcp monitor` while reproducing the issue, in addition to the output from these commands executed just before **and** after the issue: + ``` + ss -ManiH + nstat + ``` + Packet traces (TCPDump / WireShark), configured IP addresses and routing can be helpful too. Check [here](https://www.mptcp.dev/debugging.html) for more details. + mptcpd's [`/usr/libexec/mptcp-get-debug`](https://raw.githubusercontent.com/multipath-tcp/mptcpd/refs/heads/main/scripts/mptcp-get-debug) script (mptcpd >= 0.13) can help to collect such infos. diff --git a/.github/ISSUE_TEMPLATE/02-feature.yml b/.github/ISSUE_TEMPLATE/02-feature.yml new file mode 100644 index 000000000000..404a29e874b2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/02-feature.yml @@ -0,0 +1,49 @@ +name: Feature Request +description: Suggest an idea for this project +labels: ["enhancement", "triage"] +body: + - type: markdown + attributes: + value: | + Thanks for helping us improve! 🙏 + Please answer these questions and provide as much information as possible about your idea. + + - type: checkboxes + id: pre-req + attributes: + label: Pre-requisites + description: "Before opening this ticket, I checked that:" + options: + - label: "A similar [idea](https://github.com/multipath-tcp/mptcp_net-next/issues?q=label%3Aenhancement) has not been reported before." + - label: "[mptcp.dev](https://www.mptcp.dev) website does not cover my case." + - label: "The [wiki](https://github.com/multipath-tcp/mptcp_net-next/wiki) doesn't cover my case." + + - type: textarea + id: description + attributes: + label: "Description" + description: "A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]" + validations: + required: true + + - type: textarea + id: solution + attributes: + label: "Solution" + description: "A clear and concise description of what you want to have." + validations: + required: true + + - type: textarea + id: alternatives + attributes: + label: "Considered alternatives" + description: "A clear and concise description of any alternative solutions or features you've considered." + validations: + required: true + + - type: textarea + id: additional-context + attributes: + label: "Additional context" + description: "Add any other context or screenshots about the feature request here." diff --git a/.github/ISSUE_TEMPLATE/03-question.yml b/.github/ISSUE_TEMPLATE/03-question.yml new file mode 100644 index 000000000000..5b7618de72db --- /dev/null +++ b/.github/ISSUE_TEMPLATE/03-question.yml @@ -0,0 +1,28 @@ +name: Question +description: Ask any questions not related to an issue or a feature request here +labels: ["question", "triage"] +body: + - type: markdown + attributes: + value: | + Thanks for helping us improve! 🙏 + Please answer these questions and provide as much information as possible about your idea. + + - type: checkboxes + id: pre-req + attributes: + label: Pre-requisites + description: "Before opening this ticket, I checked that:" + options: + - label: "A similar [question](https://github.com/multipath-tcp/mptcp_net-next/issues?q=label%3question) has not been reported before." + - label: "[mptcp.dev](https://www.mptcp.dev) website does not cover my case." + - label: "The [wiki](https://github.com/multipath-tcp/mptcp_net-next/wiki) doesn't cover my case." + - label: "This is not a question related to the current behavior, an issue or a feature requst: if it is, please use [another template](https://github.com/multipath-tcp/mptcp_net-next/issues/new/choose) **even if it is a question**: we will need details about your system: kernel version, config, etc." + + - type: textarea + id: question + attributes: + label: "My question" + description: "A clear and concise description of your question" + validations: + required: true diff --git a/.github/workflows/build-validation.yml b/.github/workflows/build-validation.yml new file mode 100644 index 000000000000..143decc1bcdc --- /dev/null +++ b/.github/workflows/build-validation.yml @@ -0,0 +1,334 @@ +name: "MPTCP Upstream Build Validation" +on: + push: + branches-ignore: + - 'archived/**' # previous branches + - 't/**' # TopGit tree + - 'net' # part of the TopGit tree + - 'net-next' # part of the TopGit tree + - 'for-review' # part of the TopGit tree + - 'for-review-net' # part of the TopGit tree + tags: + - 'patchew/**' # patchew is using tags + # ideally, we would take 'export/**' but the cache is per branch... + # In other words, when using tags, we can only use the cache if we re-tag. + # https://github.com/actions/cache/issues/556 + # So we build the "export" branch and we try to find the tag later + +env: + CURL_OPT: "--no-progress-meter --connect-timeout 30 --retry 20 --retry-delay 10" + CURL_ACC: "Accept: application/vnd.github.v3+json" + URI: "https://api.github.com" + PW: "https://patchwork.kernel.org/api/1.2" + +permissions: {} + +jobs: + build: + name: "Build (matrix)" + if: "! startswith(github.ref, 'refs/tags/patchew/') || contains(github.event.head_commit.message, 'Message-Id: ')" + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + defconfig: ['x86_64', 'i386'] + ipv6: ['with_ipv6', 'without_ipv6'] + mptcp: ['with_mptcp', 'without_mptcp'] + permissions: + contents: read # to fetch code (actions/checkout) + + steps: + - name: "Checkout (light)" + if: github.ref != 'refs/heads/export' + uses: actions/checkout@v4 + with: + fetch-depth: 100 # we should not have more commits on top of export and -net + + - name: "Checkout (export)" + if: github.ref == 'refs/heads/export' + uses: actions/checkout@v4 + with: + fetch-depth: 0 # we need to fetch all commits between net and net-next, quicker to get everything + + - name: "Find base branch" + id: branch + run: | + if [ "${REF_NAME}" = "export" ]; then # just to avoid the next cmd + echo "name=export" >> ${GITHUB_OUTPUT} + elif [ -n "$(git log -1 --grep "^DO-NOT-MERGE: mptcp: enabled by default (net)$" --format="format:%H" HEAD -- net/mptcp/Kconfig)" ]; then + echo "name=export-net" >> ${GITHUB_OUTPUT} + else + echo "name=export" >> ${GITHUB_OUTPUT} + fi + env: + REF_NAME: ${{ github.ref_name }} + + - name: "Restore cache for CCache" + uses: actions/cache/restore@v4 + id: restore-ccache + with: + path: ${{ github.workspace }}/.ccache + key: ${{ runner.os }}_build_${{ matrix.defconfig }}_${{ matrix.ipv6 }}_${{ matrix.mptcp }}_${{ steps.branch.outputs.name }}-${{ github.run_id }}-${{ github.run_attempt }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}_build_${{ matrix.defconfig }}_${{ matrix.ipv6 }}_${{ matrix.mptcp }}_${{ steps.branch.outputs.name }}-${{ github.run_id }}-${{ github.run_attempt }}-${{ github.sha }} + ${{ runner.os }}_build_${{ matrix.defconfig }}_${{ matrix.ipv6 }}_${{ matrix.mptcp }}_${{ steps.branch.outputs.name }}-${{ github.run_id }}-${{ github.run_attempt }}- + ${{ runner.os }}_build_${{ matrix.defconfig }}_${{ matrix.ipv6 }}_${{ matrix.mptcp }}_${{ steps.branch.outputs.name }}-${{ github.run_id }}- + ${{ runner.os }}_build_${{ matrix.defconfig }}_${{ matrix.ipv6 }}_${{ matrix.mptcp }}_${{ steps.branch.outputs.name }}- + + - name: "Build Validation" + uses: multipath-tcp/mptcp-upstream-validate-export-action@main + with: + # we want to validate each commits on top of net-next/export (or -net) except for stable + each_commit: ${{ ! startswith(github.ref, 'refs/heads/stable/') }} + ccache_maxsize: 300M ## 10GB = project limit + defconfig: ${{ matrix.defconfig }} + ipv6: ${{ matrix.ipv6 }} + mptcp: ${{ matrix.mptcp }} + debug: ${{ runner.debug }} + + - name: "Artifacts" + if: always() + uses: actions/upload-artifact@v4 + with: + name: results-${{ matrix.defconfig }}_${{ matrix.ipv6 }}_${{ matrix.mptcp }} + path: ./build-*-results.txt + + - name: "Publish details" + if: always() + run: | + if stat ./build-*-results.txt &>/dev/null; then + echo '- Results for ${{ matrix.defconfig }} ${{ matrix.ipv6 }} ${{ matrix.mptcp }}:' >> ${GITHUB_STEP_SUMMARY} + echo "\`\`\`" >> ${GITHUB_STEP_SUMMARY} + cat ./build-*-results.txt >> ${GITHUB_STEP_SUMMARY} + echo "\`\`\`" >> ${GITHUB_STEP_SUMMARY} + fi + + - name: "Save cache for CCache" + if: github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net' + uses: actions/cache/save@v4 + with: + path: ${{ github.workspace }}/.ccache + key: ${{ steps.restore-ccache.outputs.cache-primary-key }} + + notif-export: + name: "Notifications export branches" + needs: build + # only for the official repo, export branches + if: always() && github.repository_owner == 'multipath-tcp' && (github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net') + runs-on: ubuntu-latest + steps: + - name: get linked tag + id: tag + run: | + TAG=$(curl ${CURL_OPT} -H "${CURL_ACC}" -H "${CURL_AUTH}" "${URL}" | jq -r ".[] | select(.object.sha == \"${SHA}\").ref" | tail -n1) + echo "Found: ${TAG} (${SHA} - ${BRANCH})" + TAG="${TAG:10}" + echo "tag=${TAG:-${BRANCH}}" >> ${GITHUB_OUTPUT} + env: + CURL_AUTH: "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" + URL: "${{ env.URI }}/repos/${{ github.repository }}/git/matching-refs/tags/" + SHA: "${{ github.sha }}" + BRANCH: "${{ github.ref_name }}" + + - name: irc build + uses: rectalogic/notify-irc@v2 + with: + server: irc.libera.chat + channel: "#mptcp-ci" + nickname: gh-build-bot + verbose: true + message: |- + New build validating ${{ steps.tag.outputs.tag }} (by ${{ github.actor }}) ended with ${{ needs.build.result }}: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + - name: irc build error + if: needs.build.result == 'failure' + uses: rectalogic/notify-irc@v2 + with: + server: irc.libera.chat + channel: "#mptcp" + nickname: gh-build-bot + verbose: true + message: |- + New build validating ${{ steps.tag.outputs.tag }} (by ${{ github.actor }}) failed: ${{ needs.build.result }}: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + notif-patchew: + name: "Notifications patchew tags" + needs: build + # only for the official repo, patchew tags branches + if: always() && github.repository_owner == 'multipath-tcp' && startswith(github.ref, 'refs/tags/patchew/') && (needs.build.result == 'success' || needs.build.result == 'failure') + runs-on: ubuntu-latest + steps: + - name: "Get Results" + uses: actions/download-artifact@v4 + with: + pattern: results-* + merge-multiple: true + + - name: "Patchwork" + run: | + # $1: mid + get_status() { + case "$(awk "/^${1} /{ print \$2 }" build-*-results.txt | sort -u)" in + 'fail'*) echo "fail"; ;; + *'warning') echo "warning"; ;; + 'success') echo "success"; ;; + *) echo "fail"; ;; + esac + } + + # $1: mid, $2: status + get_desc() { + awk "/^${1} ${2} /{ + out=\$3 + for(i=4; i<=NF; i++) + out=out\" \"\$i + print out + }" build-*-results.txt | sort -u | sed '$!{:a;N;s/\n/ ; /;ta}' + } + + # $1: mid, $2: status, $3: desc + _send() { local check_url + check_url="$(curl "${URL_PW}${1}" | jq -r 'last(.[].checks)')" + if [ -z "${check_url}" ] || [ "${check_url}" = "null" ]; then + echo "URL not found: '${check_url}' '${URL_PW}${1}'" + return 1 + fi + + curl ${CURL_OPT} \ + -X POST \ + -H "Authorization: Token ${{ secrets.PW_TOKEN }}" \ + -F "state=${2}" \ + -F "target_url=${URL_GH}" \ + -F "context=build" \ + -F "description=${3}" \ + "${check_url}" | jq '.' + } + + FIRST=1 + send() { local i + # patches can take a bit of time to appear: retry the first time + if [ "${FIRST}" = "1" ]; then + FIRST=0 + + for i in $(seq 45); do + if _send "${@}"; then + echo "Successful sent after ${i} attempts" + return 0 + fi + sleep 1m + done + + curl "${URL_PW}${1}" + return 1 + else + _send "${@}" + fi + } + + if ! ls ./build-*-results.txt; then + echo "Strange, no results, please check why" + exit 1 + fi + + while read -r mid; do + status=$(get_status "${mid}") + desc=$(get_desc "${mid}" "${status}") + send "${mid}" "${status}" "${desc}" + done < <(awk '{ print $1 }' build-*-results.txt | sort -u) + env: + URL_GH: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + URL_PW: "${{ env.PW }}/patches/?project=mptcp&msgid=" + + - name: get commit info + id: commit + if: needs.build.result == 'failure' + run: | + cat <<'EOF' > commit.json + ${{ toJSON(github.event.head_commit) }} + EOF + + # ignore error, just in case the MID has not been added by the author + read -r TAG MID < <(jq -r '.message' commit.json | grep -i "^Message-Id: " | tail -n1) || true + + # Guess the subject from the last commit + SUBJECT=$(jq -r '.message' commit.json | head -n1) + + if [ -n "${MID:1:-1}" ]; then + # get cover-letter and series' name if any + URL_PW_SERIES_API=$(curl "${URL_PW}${MID:1:-1}" | jq -er 'last(last(.[].series)[].url)' || true) + if [ -n "${URL_PW_SERIES_API}" ] && [ "${URL_PW_SERIES_API}" != "null" ]; then + echo "series=${URL_PW_SERIES}$(basename "${URL_PW_SERIES_API}")" >> ${GITHUB_OUTPUT} + if curl "${URL_PW_SERIES_API}" > pw_series.json && [ -s pw_series.json ]; then + CL="$(jq '.cover_letter' pw_series.json || true)" + if [ -n "${CL}" ] && [ "${CL}" != "null" ] && [ "${CL}" != "{}" ]; then + MID=$(echo "${CL}" | jq -er '.msgid' || echo "${MID}") + SUBJECT=$(jq -er '.name' pw_series.json || echo "${SUBJECT}") + fi + fi + fi + + # get tags from Lore: not fully available from Patchwork + SUBJECT="$(curl "${URL_LORE//MID/${MID:1:-1}}" | grep '^Subject: ' | head -n1 | sed 's/^Subject: \(\[.*\] \).*/\1/')${SUBJECT}" + fi + + echo "Found message ID: '${TAG}' '${MID}'" + echo "mid=${MID:1:-1}" >> ${GITHUB_OUTPUT} + + echo "Found subject: '${SUBJECT}'" + echo "subject=${SUBJECT}" >> ${GITHUB_OUTPUT} + + NAME=$(jq -r '.author.name' commit.json) + EMAIL=$(jq -r '.author.email' commit.json) + echo "Found author: '${NAME}' '${EMAIL}'" + echo "name=${NAME%% *}" >> ${GITHUB_OUTPUT} + echo "author=${NAME} <${EMAIL}>" >> ${GITHUB_OUTPUT} + + SHA=$(jq -r '.id' commit.json) + echo "Found SHA: '${SHA}' ('${SHA:0:12}')" + echo "sha=${SHA:0:12}" >> ${GITHUB_OUTPUT} + + COMMITTER=$(jq -r '.committer.name' commit.json) + echo "Found committer: '${COMMITTER}'" + echo "committer=${COMMITTER}" >> ${GITHUB_OUTPUT} + env: + URL_PW: "${{ env.PW }}/patches/?project=mptcp&msgid=" + URL_PW_SERIES: "https://patchwork.kernel.org/project/mptcp/list/?series=" + URL_LORE: "https://lore.kernel.org/mptcp/MID/raw" + + - name: send email + if: needs.build.result == 'failure' + uses: dawidd6/action-send-mail@v3 + with: + server_address: smtp.gmail.com + server_port: 465 + username: ${{ secrets.MAIL_USERNAME }} + password: ${{ secrets.MAIL_PASSWORD }} + to: ${{ steps.commit.outputs.author }} + cc: mptcp@lists.linux.dev + from: MPTCP CI + reply_to: mptcp@lists.linux.dev + in_reply_to: "<${{ steps.commit.outputs.mid }}>" + subject: "Re: ${{ steps.commit.outputs.subject }}" + body: | + Hi ${{ steps.commit.outputs.name }}, + + Thank you for your modifications, that's great! + + But sadly, our CI spotted some issues with it when trying to build it. + + You can find more details there: + + ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + Status: ${{ needs.build.result }} + Initiator: ${{ steps.commit.outputs.committer }} + Commits: ${{ github.server_url }}/${{ github.repository }}/commits/${{ steps.commit.outputs.sha }} + Patchwork: ${{ steps.commit.outputs.series }} + + Feel free to reply to this email if you cannot access logs, if you need + some support to fix the error, if this doesn't seem to be caused by your + modifications or if the error is a false positive one. + + Cheers, + MPTCP GH Action bot + Bot operated by Matthieu Baerts (NGI0 Core) diff --git a/.github/workflows/checkpatch.yml b/.github/workflows/checkpatch.yml new file mode 100644 index 000000000000..d86e76eaed06 --- /dev/null +++ b/.github/workflows/checkpatch.yml @@ -0,0 +1,191 @@ +name: "CheckPatch" +on: + push: + branches-ignore: + - 'archived/**' # previous branches + - 't/**' # TopGit tree + - 'net' # part of the TopGit tree + - 'net-next' # part of the TopGit tree + - 'for-review' # part of the TopGit tree + - 'for-review-net' # part of the TopGit tree + tags: + - 'patchew/**' # patchew is using tags + +env: + CURL_OPT: "--no-progress-meter --connect-timeout 30 --retry 20 --retry-delay 10" + PW: "https://patchwork.kernel.org/api/1.2" + CHECKPATCH_RESULTS: "./checkpatch-results.txt" + CHECKPATCH_DETAILS: "./checkpatch-details.txt" + SHELLCHECK_RESULTS: "./shellcheck-results.txt" + SHELLCHECK_DETAILS: "./shellcheck-details.txt" + +permissions: {} + +jobs: + checkpatch: + name: "Checkpatch" + # for others or for the official repo but only commits from patchew + if: "github.repository_owner != 'multipath-tcp' || (startswith(github.ref, 'refs/tags/patchew/') && contains(github.event.head_commit.message, 'Message-Id: '))" + runs-on: ubuntu-latest + permissions: + contents: read # to fetch code (actions/checkout) + + steps: + - name: "Checkout" + uses: actions/checkout@v4 + with: + fetch-depth: 0 ## to make sure a mentioned commit exists + + - name: "Checkpatch" + uses: multipath-tcp/mptcp-upstream-validate-export-action@main + with: + each_commit: true + checkpatch: true + debug: ${{ secrets.BUILD_ACTION_DEBUG }} + + - name: "Publish details" + if: always() + run: | + if [ -s "${{ env.CHECKPATCH_DETAILS }}" ]; then + echo '## CheckPatch' >> ${GITHUB_STEP_SUMMARY} + cat "${{ env.CHECKPATCH_DETAILS }}" >> ${GITHUB_STEP_SUMMARY} + fi + if [ -s "${{ env.SHELLCHECK_DETAILS }}" ]; then + echo '## ShellCheck' >> ${GITHUB_STEP_SUMMARY} + cat "${{ env.SHELLCHECK_DETAILS }}" >> ${GITHUB_STEP_SUMMARY} + fi + + - name: "Artifacts" + uses: actions/upload-artifact@v4 + with: + name: results + path: | + ${{ env.CHECKPATCH_RESULTS }} + ${{ env.SHELLCHECK_RESULTS }} + + - name: "Artifacts" + uses: actions/upload-artifact@v4 + with: + name: details + path: | + ${{ env.CHECKPATCH_DETAILS }} + ${{ env.SHELLCHECK_DETAILS }} + + notif: + name: "Notifications" + needs: checkpatch + # only for the official repo (patchew) + if: github.repository_owner == 'multipath-tcp' && startswith(github.ref, 'refs/tags/patchew/') && (needs.checkpatch.result == 'success' || needs.checkpatch.result == 'failure') + runs-on: ubuntu-latest + steps: + - name: "Get Results" + uses: actions/download-artifact@v4 + with: + name: results + + - name: "Patchwork" + run: | + # $1: mid, $2: status, $3: desc, $4: context + _send() { local check_url + check_url="$(curl "${URL_PW}${1}" | jq -r 'last(.[].checks)')" + if [ -z "${check_url}" ] || [ "${check_url}" = "null" ]; then + echo "URL not found: '${check_url}' '${URL_PW}${1}'" + return 1 + fi + + curl ${CURL_OPT} \ + -X POST \ + -H "Authorization: Token ${{ secrets.PW_TOKEN }}" \ + -F "state=${2}" \ + -F "target_url=${URL_GH}" \ + -F "context=${4}" \ + -F "description=${3}" \ + "${check_url}" | jq '.' + } + + FIRST=1 + send() { local i + # patches can take a bit of time to appear: retry the first time + if [ "${FIRST}" = "1" ]; then + FIRST=0 + + for i in $(seq 45); do + if _send "${@}"; then + echo "Successful sent after ${i} attempts" + return 0 + fi + sleep 1m + done + + curl "${URL_PW}${1}" + return 1 + else + _send "${@}" + fi + } + + # $1: file, $2: context + parse_results() { + if [ ! -s "${1}" ]; then + echo "Strange, no results, please check why" + return 1 + fi + + while read -r mid status desc; do + echo "Sending: '${mid}' '${status}' '${desc}' '${2}'" + send "${mid}" "${status}" "${desc}" "${2}" + done < "${1}" + } + + rc=0 + parse_results "${CHECKPATCH_RESULTS}" "checkpatch" || rc=1 + parse_results "${SHELLCHECK_RESULTS}" "shellcheck" || rc=1 + exit ${rc} + + env: + URL_GH: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + URL_PW: "${{ env.PW }}/patches/?project=mptcp&msgid=" + + status: + name: "Status" + needs: checkpatch + # for others, to report an error if patches were not OK + if: github.repository_owner != 'multipath-tcp' + runs-on: ubuntu-latest + steps: + - name: "Get Results" + uses: actions/download-artifact@v4 + with: + name: results + + - name: "Set exit status" + run: | + # $1: result file, $2: context + check() { + if [ ! -s "${1}" ]; then + echo "Strange, no results, please check why" + exit 1 + fi + + if awk '{ if ($2 != "success") exit 1 }' "${1}"; then + echo " *** Everything OK with ${2}, good job!" + return 0 + fi + + echo " *** ${2} detected some issues:" + cat "${1}" + echo " *** End of the issues detected by ${2}" + + return 1 + } + + echo + rc=0 + check "${CHECKPATCH_RESULTS}" "CheckPatch" || rc=1 + check "${SHELLCHECK_RESULTS}" "ShellCheck" || rc=1 + [ ${rc} -eq 0 ] && exit 0 + + echo + echo "Please check the summary page for more details about these issues:" + echo " ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + exit ${rc} diff --git a/.github/workflows/notif.yml b/.github/workflows/notif.yml new file mode 100644 index 000000000000..c4ca514ec0cb --- /dev/null +++ b/.github/workflows/notif.yml @@ -0,0 +1,56 @@ +name: "Notifications" +on: + push: + tags: + - 'export/**' # exclude patchew/** tags and branches + - 'export-net/**' + issues: + types: [opened, reopened, closed, assigned, unassigned] + +permissions: {} + +jobs: + tag: + name: "Tag" + if: github.repository_owner == 'multipath-tcp' && github.event_name == 'push' + runs-on: ubuntu-latest + steps: + - name: tag shortner + id: tag + run: | + echo "tag=${REF:10}" >> ${GITHUB_OUTPUT} + env: + REF: ${{ github.event.ref }} + - name: irc tag + uses: rectalogic/notify-irc@v2 + with: + server: irc.libera.chat + channel: "#mptcp-ci" + nickname: gh-tag-bot + verbose: true + message: "New tag available: ${{ steps.tag.outputs.tag }} (by ${{ github.actor }})" + + issues: + name: "Issues" + if: github.repository_owner == 'multipath-tcp' && github.event_name == 'issues' + runs-on: ubuntu-latest + steps: + - name: issue info + id: info + if: github.event.action != 'opened' + run: | + echo "opener=, opened by ${OPENER}" >> ${GITHUB_OUTPUT} + echo "assignee=${ASSIGNEE:+ and assigned to ${ASSIGNEE}}" >> ${GITHUB_OUTPUT} + env: + OPENER: ${{ github.event.issue.user.login }} + ASSIGNEE: ${{ github.event.assignee.login }} + - name: irc issues + uses: rectalogic/notify-irc@v2 + with: + server: irc.libera.chat + channel: "#mptcp" + nickname: gh-issues-bot + verbose: true + message: |- + Issue #${{ github.event.issue.number }} ("${{ github.event.issue.title }}"${{ steps.info.outputs.opener }}${{ steps.info.outputs.assignee }}) has been ${{ github.event.action }} by ${{ github.actor }} + ${{ github.event.issue.html_url }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000000..399377da967c --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,562 @@ +name: "MPTCP Upstream Tests Validation" +on: + push: + branches-ignore: + - 'archived/**' # previous branches + - 't/**' # TopGit tree + - 'net' # part of the TopGit tree + - 'net-next' # part of the TopGit tree + - 'for-review' # part of the TopGit tree + - 'for-review-net' # part of the TopGit tree + tags: + - 'patchew/**' # patchew is using tags + # ideally, we would take 'export/**' but the cache is per branch... + # In other words, when using tags, we can only use the cache if we re-tag. + # https://github.com/actions/cache/issues/556 + # So we test the "export" branch and we try to find the tag later + +env: + CURL_OPT: "--no-progress-meter --connect-timeout 30 --retry 20 --retry-delay 10" + CURL_ACC: "Accept: application/vnd.github.v3+json" + URI: "https://api.github.com" + PW: "https://patchwork.kernel.org/api/1.2" + +permissions: {} + +jobs: + tests: + name: "Tests" + if: "! startswith(github.ref, 'refs/tags/patchew/') || contains(github.event.head_commit.message, 'Message-Id: ')" + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + mode: ['normal', 'debug', 'btf-normal', 'btf-debug'] + permissions: + contents: read # to fetch code (actions/checkout) + + steps: + - name: "Checkout" + uses: actions/checkout@v4 + + #- name: "Collect Workflow Telemetry" + # uses: catchpoint/workflow-telemetry-action@v2 + + - name: "Find base branch" + id: branch + run: | + if [ "$(cat .git_markup)" = "MPTCP-related modifications only needed for our tests suite (mptcp-net)." ]; then + echo "name=export-net" >> ${GITHUB_OUTPUT} + else + echo "name=export" >> ${GITHUB_OUTPUT} + fi + + mode="${{ matrix.mode }}" + echo "mode=${mode//-/_}" >> ${GITHUB_OUTPUT} + + - name: "Restore cache for CCache" + uses: actions/cache/restore@v4 + id: restore-ccache + with: + path: ${{ github.workspace }}/.virtme/ccache + key: ${{ runner.os }}_tests_${{ steps.branch.outputs.name }}-${{ steps.branch.outputs.mode }}-${{ github.run_id }}-${{ github.run_attempt }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}_tests_${{ steps.branch.outputs.name }}-${{ steps.branch.outputs.mode }}-${{ github.run_id }}-${{ github.run_attempt }}-${{ github.sha }} + ${{ runner.os }}_tests_${{ steps.branch.outputs.name }}-${{ steps.branch.outputs.mode }}-${{ github.run_id }}-${{ github.run_attempt }}- + ${{ runner.os }}_tests_${{ steps.branch.outputs.name }}-${{ steps.branch.outputs.mode }}-${{ github.run_id }}- + ${{ runner.os }}_tests_${{ steps.branch.outputs.name }}-${{ steps.branch.outputs.mode }}- + ${{ runner.os }}_tests_${{ steps.branch.outputs.name }}- + + - name: "Docker image" + run: | + /usr/bin/docker pull ghcr.io/multipath-tcp/mptcp-upstream-virtme-docker:${{ steps.branch.outputs.name == 'export' && 'latest' || 'net' }} + + - name: "Tests" + timeout-minutes: 120 + run: | + echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules + sudo udevadm control --reload-rules + sudo udevadm trigger --name-match=kvm + + # remove old cache if any + rm -rvf "${{ github.workspace }}/.virtme/ccache-"* 2>/dev/null + + set -x + /usr/bin/docker run --privileged --rm \ + -e "INPUT_CCACHE_MAXSIZE=500M" \ + -e "INPUT_CCACHE_DIR=ccache" \ + -e "INPUT_PACKETDRILL_STABLE=${{ steps.branch.outputs.name == 'export-net' && '1' || '0' }}" \ + -e "INPUT_EXTRA_ENV=${{ startsWith(matrix.mode, 'btf-') && 'INPUT_RUN_TESTS_ONLY=bpftest_all' || '' }}" \ + -e "INPUT_TRACE=${RUNNER_DEBUG}" \ + -e "INPUT_GCOV=1" \ + -e "GITHUB_SHA" -e "GITHUB_REF_NAME" -e "GITHUB_RUN_ID" \ + -e GITHUB_ACTIONS=true -e CI=true \ + --workdir "${PWD}" \ + -v "${PWD}:${PWD}" \ + ghcr.io/multipath-tcp/mptcp-upstream-virtme-docker:${{ steps.branch.outputs.name == 'export' && 'latest' || 'net' }} \ + auto-${{ matrix.mode }} + + - name: "Publish conclusion" + if: always() + run: | + set +e + if [ -s "conclusion.txt" ]; then + { + echo '## Mode ${{ matrix.mode }}' + echo '### Conclusion (${{ matrix.mode }})' + cat "conclusion.txt" + echo '' + echo '### Summary (${{ matrix.mode }})' + echo '```' + cat "summary.txt" + echo '```' + echo '' + echo '### Coverage (${{ matrix.mode }})' + echo '```' + cat "coverage.txt" || echo "No coverage" + echo '```' + } >> "${GITHUB_STEP_SUMMARY}" + fi + touch kernel.lcov || true + + - name: "Artifacts (always)" + if: always() + uses: actions/upload-artifact@v4 + with: + name: results-${{ matrix.mode }} + path: | + conclusion.txt + summary.txt + coverage.txt + *.tap + config.zstd + *.tap.xml + results.json + + - name: "Artifacts (failure)" + if: failure() + uses: actions/upload-artifact@v4 + with: + name: debug-info-${{ matrix.mode }} + path: | + vmlinux.zstd + kmemleak.txt + + - name: "Artifacts (LCov)" + uses: actions/upload-artifact@v4 + with: + name: lcov-${{ matrix.mode }} + compression-level: 9 + path: | + kernel.lcov + + - name: "Artifacts (code)" + uses: actions/upload-artifact@v4 + if: github.repository_owner == 'multipath-tcp' && matrix.mode == 'normal' && (github.ref_name == 'export' || github.ref_name == 'export-net') + with: + name: code + compression-level: 9 + path: | + net/mptcp/*.[ch] + + - name: Coveralls Parallel + uses: coverallsapp/github-action@v2 + if: always() && (github.repository_owner != 'multipath-tcp' || github.ref_name == 'export' || github.ref_name == 'export-net') + with: + flag-name: ${{ matrix.mode }} + parallel: true + file: kernel.lcov + format: lcov + allow-empty: true + compare-ref: ${{ steps.branch.outputs.name }} + + - name: "Publish Test Results" + uses: EnricoMi/publish-unit-test-result-action@v2 + if: always() + with: + compare_to_earlier_commit: false + check_run: false + check_name: "Test Results (${{ matrix.mode }})" + files: | + *.tap.xml + + - name: "Save cache for CCache" + if: always() && (github.repository_owner != 'multipath-tcp' || github.ref_name == 'export' || github.ref_name == 'export-net') + uses: actions/cache/save@v4 + with: + path: ${{ github.workspace }}/.virtme/ccache + key: ${{ steps.restore-ccache.outputs.cache-primary-key }} + + publish-test-results: + name: "Publish Tests Results" + needs: tests + if: always() + runs-on: ubuntu-latest + permissions: + checks: write + + steps: + - name: "Get results" + uses: actions/download-artifact@v4 + with: + pattern: results-* + merge-multiple: false + + - name: "Publish Test Results" + uses: EnricoMi/publish-unit-test-result-action@v2 + with: + check_run_annotations_branch: "${{ steps.branch.outputs.name }}" + files: | + results-*/*.tap.xml + + - name: Coveralls Finished + uses: coverallsapp/github-action@v2 + if: github.repository_owner != 'multipath-tcp' || github.ref_name == 'export' || github.ref_name == 'export-net' + with: + parallel-finished: true + carryforward: "normal,debug,btf-normal,btf-debug" + + notif: + name: "Notifications" + needs: tests + # only for the official repo (patchew and export) + if: always() && github.repository_owner == 'multipath-tcp' && (needs.tests.result == 'success' || needs.tests.result == 'failure') + concurrency: + group: ${{ startswith(github.ref, 'refs/heads/export') && 'ci-notif' || github.sha }} + cancel-in-progress: false + runs-on: ubuntu-latest + steps: + - name: get results + uses: actions/download-artifact@v4 + with: + pattern: results-* + merge-multiple: false + + - name: get test info + id: test + run: | + for mode in normal debug btf-normal btf-debug; do + ccl="$(cat "results-${mode}/conclusion.txt")" + echo "ccl_${mode//-/_}=${ccl:-"KVM Validation: ${mode}: Critical: No conclusion ❓"}" >> ${GITHUB_OUTPUT} + echo "ccl_title_${mode//-/_}=$(echo "${ccl}" | cut -d: -f1-2)" >> ${GITHUB_OUTPUT} + echo "ccl_status_${mode//-/_}=$(echo "${ccl}" | cut -d: -f3- | sed 's/^ //')" >> ${GITHUB_OUTPUT} + done + echo "url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" >> ${GITHUB_OUTPUT} + + - name: get linked tag + if: github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net' + id: tag + run: | + TAG=$(curl ${CURL_OPT} -H "${CURL_ACC}" -H "${CURL_AUTH}" "${URL}" | jq -r ".[] | select(.object.sha == \"${SHA}\").ref" | grep "^refs/tags/export" | tail -n1) + echo "Found: ${TAG} (${SHA} - ${BRANCH})" + TAG="${TAG:10}" + echo "tag=${TAG:-${BRANCH}}" >> ${GITHUB_OUTPUT} + env: + CURL_AUTH: "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" + URL: "${{ env.URI }}/repos/${{ github.repository }}/git/matching-refs/tags/" + SHA: "${{ github.sha }}" + BRANCH: "${{ github.ref_name }}" + + - name: irc tests + if: github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net' + uses: rectalogic/notify-irc@v2 + with: + server: irc.libera.chat + channel: "#mptcp-ci" + nickname: gh-tests-bot + verbose: true + message: |- + New GH Actions Tests job validating ${{ steps.tag.outputs.tag }} (by ${{ github.actor }}) just ended: + - ${{ steps.test.outputs.ccl_normal }} + - ${{ steps.test.outputs.ccl_debug }} + - ${{ steps.test.outputs.ccl_btf_normal }} + - ${{ steps.test.outputs.ccl_btf_debug }} + - Task: ${{ steps.test.outputs.url }} + + - name: Checkout results repo + if: github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net' + uses: actions/checkout@v4 + with: + repository: "multipath-tcp/mptcp-upstream-tests-results" + token: '${{ secrets.PAT_MATTTBE }}' + path: results + + - name: setup results repo + if: github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net' + run: | + cd results + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + + - name: save flakes results + if: github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net' + run: | + for mode in normal debug btf-normal btf-debug; do + new="results-${mode}/results.json" + all="results/html/results/${{ github.ref_name }}/${mode}.json" + if [ ! -s "${new}" ]; then + echo '{"error": "all", "run_id": "${{ github.run_id }}"}' > "${new}" + fi + # append tag, merge results, limit + jq -c '.tag += "${{ steps.tag.outputs.tag }}"' "${new}" > "${new}.tag" + jq -c '. += [input]' "${all}" "${new}.tag" > "${new}.all" + jq --indent 1 '.[-100:]' "${new}.all" > "${all}" + done + + cd results + git add html/results/${{ github.ref_name }}/*.json + git commit -m "json: new: ${{ steps.tag.outputs.tag }}" + + - name: get lcov + if: needs.tests.result == 'success' && (github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net') + uses: actions/download-artifact@v4 + with: + pattern: lcov-* + merge-multiple: false + + - name: get code + if: needs.tests.result == 'success' && (github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net') + uses: actions/download-artifact@v4 + with: + name: code + path: net/mptcp + + - name: lcov to html and publish results + if: needs.tests.result == 'success' && (github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net') + run: | + out="results/html/lcov/${{ github.ref_name }}" + rm -rf "${out}" + mkdir -p "${out}" + /usr/bin/docker run --pull always --rm \ + --workdir "${PWD}" \ + -v "${PWD}:${PWD}" \ + mptcp/docker-lcov-alpine:latest \ + genhtml -j "$(nproc)" -t "${{ github.ref_name }}" \ + --dark-mode --legend \ + --include '/net/mptcp/' --flat \ + --function-coverage --branch-coverage --keep-going \ + -o "${out}" lcov-*/kernel.lcov | tee genhtml.log + + { + echo '' + echo '## Coverage (All)' + echo '```' + tail -n4 genhtml.log + echo '```' + } >> "${GITHUB_STEP_SUMMARY}" + + cd results + git add html/lcov/${{ github.ref_name }} + git commit -m "lcov: new: ${{ steps.tag.outputs.tag }}" || true + + - name: push results + if: github.ref == 'refs/heads/export' || github.ref == 'refs/heads/export-net' + run: | + cd results + git push + + - name: get commit info + id: commit + if: startswith(github.ref, 'refs/tags/patchew/') + run: | + cat <<'EOF' > commit.json + ${{ toJSON(github.event.head_commit) }} + EOF + + # ignore error, just in case the MID has not been added by the author + read -r TAG MID < <(jq -r '.message' commit.json | grep -i "^Message-Id: " | tail -n1) || true + echo "Found message ID: '${TAG}' '${MID}'" + echo "mid=${MID:1:-1}" >> ${GITHUB_OUTPUT} + + # Guess the subject from the last commit + SUBJECT=$(jq -r '.message' commit.json | head -n1) + echo "Found subject: '${SUBJECT}'" + echo "subject=${SUBJECT}" >> ${GITHUB_OUTPUT} + + NAME=$(jq -r '.author.name' commit.json) + EMAIL=$(jq -r '.author.email' commit.json) + echo "Found author: '${NAME}' '${EMAIL}'" + echo "name=${NAME%% *}" >> ${GITHUB_OUTPUT} + echo "author=${NAME} <${EMAIL}>" >> ${GITHUB_OUTPUT} + + SHA=$(jq -r '.id' commit.json) + echo "Found SHA: '${SHA}' ('${SHA:0:12}')" + echo "sha=${SHA:0:12}" >> ${GITHUB_OUTPUT} + + COMMITTER=$(jq -r '.committer.name' commit.json) + echo "Found committer: '${COMMITTER}'" + echo "committer=${COMMITTER}" >> ${GITHUB_OUTPUT} + + - name: set patchwork check + if: startswith(github.ref, 'refs/tags/patchew/') + run: | + CHECK_URLS=() + set_url() { local series_url + series_url=$(curl ${CURL_OPT} "${URL}" | jq -r 'last(last(.[].series)[].url)') + if [ -z "${series_url}" ] || [ "${series_url}" = "null" ]; then + echo "Series not found: '${series_url}' '${URL}'" + return 1 + fi + + echo "Found Series: '${series_url}'" + + readarray -t CHECK_URLS < <(curl ${CURL_OPT} "${series_url}" | jq -r '.patches[].url + "checks/"') + } + + # $1: title, $2: status, $3: url + submit() { local check_url + if [[ "${2}" == "Success"* ]]; then + STATE="success" + elif [[ "${2}" == "Unstable"* ]]; then + STATE="warning" + else + STATE="fail" + fi + + for check_url in "${CHECK_URLS[@]}"; do + curl ${CURL_OPT} \ + -X POST \ + -H "Authorization: Token ${{ secrets.PW_TOKEN }}" \ + -F "state=${STATE}" \ + -F "target_url=${3}" \ + -F "context=${1//[ :()]/_}" \ + -F "description=${2}" \ + "${check_url}" | jq '.' + done + } + + for i in $(seq 30); do # patches can take a bit of time to appear + set_url && break + sleep 1m + done + + if [ "${#CHECK_URLS[@]}" -eq 0 ]; then + echo "Error: didn't find any URLs after ${i} attempts" + exit 1 + fi + echo "Found: ${#CHECK_URLS[@]} urls after ${i} attempts: ${CHECK_URLS[@]}" + + submit "${{ steps.test.outputs.ccl_title_normal }}" "${{ steps.test.outputs.ccl_status_normal }}" "${{ steps.test.outputs.url }}" + submit "${{ steps.test.outputs.ccl_title_debug }}" "${{ steps.test.outputs.ccl_status_debug }}" "${{ steps.test.outputs.url }}" + submit "${{ steps.test.outputs.ccl_title_btf_normal }}" "${{ steps.test.outputs.ccl_status_btf_normal }}" "${{ steps.test.outputs.url }}" + submit "${{ steps.test.outputs.ccl_title_btf_debug }}" "${{ steps.test.outputs.ccl_status_btf_debug }}" "${{ steps.test.outputs.url }}" + env: + URL: "${{ env.PW }}/patches/?project=mptcp&msgid=${{ steps.commit.outputs.mid }}" + + # do that after having set patchwork checks, so we already waited for it to be ready + - name: get series info + id: series + if: startswith(github.ref, 'refs/tags/patchew/') + run: | + if [ -n "${MID:1:-1}" ]; then + # get cover-letter and series' name if any + URL_PW_SERIES_API=$(curl "${URL_PW}${MID:1:-1}" | jq -er 'last(last(.[].series)[].url)' || true) + if [ -n "${URL_PW_SERIES_API}" ] && [ "${URL_PW_SERIES_API}" != "null" ]; then + echo "series=${URL_PW_SERIES}$(basename "${URL_PW_SERIES_API}")" >> ${GITHUB_OUTPUT} + if curl "${URL_PW_SERIES_API}" > pw_series.json && [ -s pw_series.json ]; then + CL="$(jq '.cover_letter' pw_series.json || true)" + if [ -n "${CL}" ] && [ "${CL}" != "null" ] && [ "${CL}" != "{}" ]; then + MID=$(echo "${CL}" | jq -er '.msgid' || echo "${MID}") + SUBJECT=$(jq -er '.name' pw_series.json || echo "${SUBJECT}") + fi + fi + fi + # get tags from Lore: not fully available from Patchwork + SUBJECT="$(curl "${URL_LORE//MID/${MID:1:-1}}" | grep '^Subject: ' | head -n1 | sed 's/^Subject: \(\[.*\] \).*/\1/')${SUBJECT}" + fi + + echo "Found message ID: '${MID}'" + echo "mid=${MID:1:-1}" >> ${GITHUB_OUTPUT} + echo "Found subject: '${SUBJECT}'" + echo "subject=${SUBJECT}" >> ${GITHUB_OUTPUT} + env: + URL_PW: "${{ env.PW }}/patches/?project=mptcp&msgid=" + URL_PW_SERIES: "https://patchwork.kernel.org/project/mptcp/list/?series=" + URL_LORE: "https://lore.kernel.org/mptcp/MID/raw" + MID: "<${{ steps.commit.outputs.mid }}>" + SUBJECT: "${{ steps.commit.outputs.subject }}" + + - name: send email + if: startswith(github.ref, 'refs/tags/patchew/') + uses: dawidd6/action-send-mail@v3 + with: + server_address: smtp.gmail.com + server_port: 465 + username: ${{ secrets.MAIL_USERNAME }} + password: ${{ secrets.MAIL_PASSWORD }} + to: ${{ steps.commit.outputs.author }} + cc: mptcp@lists.linux.dev + from: MPTCP CI + reply_to: mptcp@lists.linux.dev + in_reply_to: "<${{ steps.series.outputs.mid }}>" + subject: "Re: ${{ steps.series.outputs.subject }}" + body: | + Hi ${{ steps.commit.outputs.name }}, + + Thank you for your modifications, that's great! + + Our CI did some validations and here is its report: + + - ${{ steps.test.outputs.ccl_title_normal }}: ${{ steps.test.outputs.ccl_status_normal }} + - ${{ steps.test.outputs.ccl_title_debug }}: ${{ steps.test.outputs.ccl_status_debug }} + - ${{ steps.test.outputs.ccl_title_btf_normal }}: ${{ steps.test.outputs.ccl_status_btf_normal }} + - ${{ steps.test.outputs.ccl_title_btf_debug }}: ${{ steps.test.outputs.ccl_status_btf_debug }} + - Task: ${{ steps.test.outputs.url }} + + Initiator: ${{ steps.commit.outputs.committer }} + Commits: ${{ github.server_url }}/${{ github.repository }}/commits/${{ steps.commit.outputs.sha }} + Patchwork: ${{ steps.series.outputs.series }} + + + If there are some issues, you can reproduce them using the same environment as + the one used by the CI thanks to a docker image, e.g.: + + $ cd [kernel source code] + $ docker run -v "${PWD}:${PWD}:rw" -w "${PWD}" --privileged --rm -it \ + --pull always mptcp/mptcp-upstream-virtme-docker:latest \ + auto-normal + + For more details: + + https://github.com/multipath-tcp/mptcp-upstream-virtme-docker + + + Please note that despite all the efforts that have been already done to have a + stable tests suite when executed on a public CI like here, it is possible some + reported issues are not due to your modifications. Still, do not hesitate to + help us improve that ;-) + + Cheers, + MPTCP GH Action bot + Bot operated by Matthieu Baerts (NGI0 Core) + + status: + name: "Status" + needs: tests + # only for the non official repos + if: always() && github.repository_owner != 'multipath-tcp' + runs-on: ubuntu-latest + steps: + - name: Get Results + uses: actions/download-artifact@v4 + with: + pattern: results-* + merge-multiple: false + + - name: Check Status + run: | + issues=() + for mode in normal debug btf-normal btf-debug; do + ccl="results-${mode}/conclusion.txt" + if [ ! -f "${ccl}" ] || ! grep -q "Success" "${ccl}"; then + issues+=("${mode}") + fi + done + if [ ${#issues[@]} -eq 0 ]; then + echo "Great, no issues!" + exit 0 + fi + echo "Issues have been found during the tests in: ${issues[*]}." + echo "Please check the summary page for more details:" + echo " ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + exit 1 diff --git a/.github/workflows/update-tg-tree.yml b/.github/workflows/update-tg-tree.yml new file mode 100644 index 000000000000..767de620ee2c --- /dev/null +++ b/.github/workflows/update-tg-tree.yml @@ -0,0 +1,54 @@ +name: "Update TopGit tree" + +on: + workflow_dispatch: + inputs: + keep_base_untouched: + description: 'Set it to 1 to force a sync without updating the base from upstream' + required: true + default: '0' + force_sync: + description: 'Set it to 1 to force a sync even if net-next is not updated' + required: true + default: '0' + force_upd_net: + description: 'Set it to 1 to force updating the -net base with upstream instead of the merge-base with net-next' + required: true + default: '1' + + schedule: + - cron: '33 5 * * 1-5' # in UTC: after US West coast's work day + +permissions: {} + +jobs: + export: + if: github.repository_owner == 'multipath-tcp' + runs-on: ubuntu-latest + permissions: + contents: write # for git push (multipath-tcp/mptcp-upstream-topgit-action) + + steps: + - name: "Checkout" + uses: actions/checkout@v4 + with: + fetch-depth: 0 # we need all commits for TopGit + token: '${{ secrets.PAT_MATTTBE }}' + + - name: "Update TG tree" + uses: multipath-tcp/mptcp-upstream-topgit-action@main + with: + not_base: "${{ github.event.inputs.keep_base_untouched || '0' }}" + force_sync: "${{ github.event.inputs.force_sync || '0' }}" + force_upd_net: "${{ github.event.inputs.force_upd_net || '1' }}" + + - name: irc topgit + if: failure() && github.repository_owner == 'multipath-tcp' + uses: rectalogic/notify-irc@v2 + with: + server: irc.libera.chat + channel: "#mptcp" + nickname: gh-topgit-bot + verbose: true + message: |- + New sync with latest net and net-next failed (initiated by ${{ github.actor }}): ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000000..bcf5c945a0a8 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,17 @@ +# Security Policy + +## Supported Versions + +The ones mentioned on [kernel.org](https://www.kernel.org). + +## Reporting a Vulnerability + +Please report any issues to us, either via +[GitHub](https://github.com/multipath-tcp/mptcp_net-next/security/advisories/new), +or via emails to the MPTCP maintainers: + + - Matthieu Baerts + - Mat Martineau + +(Check the [MAINTAINERS](https://github.com/multipath-tcp/mptcp_net-next/blob/export/MAINTAINERS) +file to get the up-to-date list.) From aa6ecea07d92c1dcd850a4ad3b3e2db3338fe7e1 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Mon, 24 Feb 2025 13:22:53 +0100 Subject: [PATCH 24/24] DO-NOT-MERGE: git markup: end common net net-next All commits older than this one are patches that are needed for both the -net and the net-next trees. Following commits are MPTCP-related modifications only needed for -net. This commit is useful to easily find where are commits needed for -net and net-next. Signed-off-by: Matthieu Baerts --- .git_markup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.git_markup b/.git_markup index 2bbf8d0d5a7d..af78ae8cf596 100644 --- a/.git_markup +++ b/.git_markup @@ -1 +1 @@ -MPTCP-related modifications needed for public CIs. +MPTCP-related modifications only needed for the -net tree.