|
| 1 | +open Multicore_bench |
| 2 | +open Picos |
| 3 | +open Picos_sync |
| 4 | +open Picos_structured |
| 5 | + |
| 6 | +let run_one_domain ~budgetf () = |
| 7 | + let n_msgs = 200 * Util.iter_factor in |
| 8 | + let t = Ch.create () in |
| 9 | + let init _ = () in |
| 10 | + let wrap _ () = Scheduler.run in |
| 11 | + let work _ () = |
| 12 | + Bundle.join_after @@ fun bundle -> |
| 13 | + begin |
| 14 | + Bundle.fork bundle @@ fun () -> |
| 15 | + for i = 1 to n_msgs do |
| 16 | + Ch.give t i |
| 17 | + done |
| 18 | + end; |
| 19 | + begin |
| 20 | + Bundle.fork bundle @@ fun () -> |
| 21 | + for _ = 1 to n_msgs do |
| 22 | + Ch.take t |> ignore |
| 23 | + done |
| 24 | + end |
| 25 | + in |
| 26 | + Times.record ~budgetf ~n_domains:1 ~init ~wrap ~work () |
| 27 | + |> Times.to_thruput_metrics ~n:n_msgs ~singular:"message" ~config:"one domain" |
| 28 | + |
| 29 | +let run_one ~budgetf ~n_givers ~n_takers () = |
| 30 | + let n_domains = n_givers + n_takers in |
| 31 | + |
| 32 | + let n_msgs = 200 / n_domains * Util.iter_factor in |
| 33 | + |
| 34 | + let t = Ch.create ~padded:true () in |
| 35 | + |
| 36 | + let n_msgs_to_give = Atomic.make 0 |> Multicore_magic.copy_as_padded in |
| 37 | + let n_msgs_to_take = Atomic.make 0 |> Multicore_magic.copy_as_padded in |
| 38 | + |
| 39 | + let init _ = |
| 40 | + Atomic.set n_msgs_to_give n_msgs; |
| 41 | + Atomic.set n_msgs_to_take n_msgs |
| 42 | + in |
| 43 | + let wrap _ () = Scheduler.run in |
| 44 | + let work i () = |
| 45 | + let computation = Computation.create () in |
| 46 | + let yielder () = |
| 47 | + try |
| 48 | + while true do |
| 49 | + Fiber.yield () |
| 50 | + done |
| 51 | + with Exit -> () |
| 52 | + in |
| 53 | + Fiber.spawn ~forbid:false computation [ yielder ]; |
| 54 | + begin |
| 55 | + if i < n_givers then |
| 56 | + let rec work () = |
| 57 | + let n = Util.alloc n_msgs_to_give in |
| 58 | + if 0 < n then begin |
| 59 | + for i = 1 to n do |
| 60 | + Ch.give t i |
| 61 | + done; |
| 62 | + work () |
| 63 | + end |
| 64 | + in |
| 65 | + work () |
| 66 | + else |
| 67 | + let rec work () = |
| 68 | + let n = Util.alloc n_msgs_to_take in |
| 69 | + if 0 < n then begin |
| 70 | + for _ = 1 to n do |
| 71 | + Ch.take t |> ignore |
| 72 | + done; |
| 73 | + work () |
| 74 | + end |
| 75 | + in |
| 76 | + work () |
| 77 | + end; |
| 78 | + Computation.cancel computation (Exn_bt.get_callstack 0 Exit) |
| 79 | + in |
| 80 | + |
| 81 | + let config = |
| 82 | + let format role n = |
| 83 | + Printf.sprintf "%d %s%s" n role (if n = 1 then "" else "s") |
| 84 | + in |
| 85 | + Printf.sprintf "%s, %s" (format "giver" n_givers) (format "taker" n_takers) |
| 86 | + in |
| 87 | + Times.record ~budgetf ~n_domains ~init ~wrap ~work () |
| 88 | + |> Times.to_thruput_metrics ~n:n_msgs ~singular:"message" ~config |
| 89 | + |
| 90 | +let run_suite ~budgetf = |
| 91 | + run_one_domain ~budgetf () |
| 92 | + @ (Util.cross [ 1; 2 ] [ 1; 2 ] |
| 93 | + |> List.concat_map @@ fun (n_givers, n_takers) -> |
| 94 | + if Picos_domain.recommended_domain_count () < n_givers + n_takers then [] |
| 95 | + else run_one ~budgetf ~n_givers ~n_takers ()) |
0 commit comments