Skip to content

Commit

Permalink
selftests/bpf: Add bpf_rr scheduler & test
Browse files Browse the repository at this point in the history
This patch implements the round-robin BPF MPTCP scheduler, named bpf_rr,
which always picks the next available subflow to send data. If no such
next subflow available, picks the first one.

Using MPTCP_SCHED_TEST macro to add a new test for this bpf_rr
scheduler, the arguments "1 1" means data has been sent on both net
devices. Run this test by RUN_MPTCP_TEST macro.

Signed-off-by: Geliang Tang <[email protected]>
Reviewed-by: Mat Martineau <[email protected]>
Reviewed-by: Matthieu Baerts (NGI0) <[email protected]>
  • Loading branch information
Geliang Tang authored and matttbe committed Feb 24, 2025
1 parent 6d4f6e4 commit 335e596
Show file tree
Hide file tree
Showing 2 changed files with 93 additions and 0 deletions.
15 changes: 15 additions & 0 deletions tools/testing/selftests/bpf/prog_tests/mptcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "mptcp_bpf_iters.skel.h"
#include "mptcp_bpf_first.skel.h"
#include "mptcp_bpf_bkup.skel.h"
#include "mptcp_bpf_rr.skel.h"

#define NS_TEST "mptcp_ns"
#define ADDR_1 "10.0.1.1"
Expand Down Expand Up @@ -696,6 +697,18 @@ static void test_bkup(void)
mptcp_bpf_bkup__destroy(skel);
}

static void test_rr(void)
{
struct mptcp_bpf_rr *skel;

skel = mptcp_bpf_rr__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_and_load: rr"))
return;

test_bpf_sched(skel->obj, "rr", WITH_DATA, WITH_DATA);
mptcp_bpf_rr__destroy(skel);
}

void test_mptcp(void)
{
if (test__start_subtest("base"))
Expand All @@ -712,4 +725,6 @@ void test_mptcp(void)
test_first();
if (test__start_subtest("bkup"))
test_bkup();
if (test__start_subtest("rr"))
test_rr();
}
78 changes: 78 additions & 0 deletions tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, SUSE. */

#include "mptcp_bpf.h"
#include <bpf/bpf_tracing.h>

char _license[] SEC("license") = "GPL";

struct mptcp_rr_storage {
struct sock *last_snd;
};

struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct mptcp_rr_storage);
} mptcp_rr_map SEC(".maps");

SEC("struct_ops")
void BPF_PROG(mptcp_sched_rr_init, struct mptcp_sock *msk)
{
bpf_sk_storage_get(&mptcp_rr_map, msk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
}

SEC("struct_ops")
void BPF_PROG(mptcp_sched_rr_release, struct mptcp_sock *msk)
{
bpf_sk_storage_delete(&mptcp_rr_map, msk);
}

SEC("struct_ops")
int BPF_PROG(bpf_rr_get_send, struct mptcp_sock *msk,
struct mptcp_sched_data *data)
{
struct mptcp_subflow_context *subflow;
struct mptcp_rr_storage *ptr;
struct sock *last_snd = NULL;
int nr = 0;

ptr = bpf_sk_storage_get(&mptcp_rr_map, msk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!ptr)
return -1;

last_snd = ptr->last_snd;

for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
subflow = bpf_mptcp_subflow_ctx_by_pos(data, i);
if (!last_snd || !subflow)
break;

if (mptcp_subflow_tcp_sock(subflow) == last_snd) {
if (i + 1 == MPTCP_SUBFLOWS_MAX ||
!bpf_mptcp_subflow_ctx_by_pos(data, i + 1))
break;

nr = i + 1;
break;
}
}

subflow = bpf_mptcp_subflow_ctx_by_pos(data, nr);
if (!subflow)
return -1;
mptcp_subflow_set_scheduled(subflow, true);
ptr->last_snd = mptcp_subflow_tcp_sock(subflow);
return 0;
}

SEC(".struct_ops")
struct mptcp_sched_ops rr = {
.init = (void *)mptcp_sched_rr_init,
.release = (void *)mptcp_sched_rr_release,
.get_send = (void *)bpf_rr_get_send,
.name = "bpf_rr",
};

0 comments on commit 335e596

Please sign in to comment.