blob: 192a7053a59975476df56bec1340799d0e45cbd7 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
|
#!/bin/bash
cd baselines/cpu
make -B NUMA=1
make inputs/randomlist10M.txt
mkdir -p log/$(hostname)
fn=log/$(hostname)/$(date +%Y%m%d)
# Upstream DPU version uses ts_size = 33554432 elements and query_length = 256 with int32_t data
# Upstream CPU version uses inputs/randomlist33M.txt with 33618177 elements and query_length = 256 with double
# However, this does not work with ~64 or more threads due to an internal tmp array allocation failure in 'profile_tmp new DTYPE[ProfileLength * numThreads]' → use 10M elements instead.
run_benchmark() {
local "$@"
OMP_NUM_THREADS=${nr_threads} ./streamp_openmp inputs/randomlist10M.txt 256 ${ram} ${cpu}
}
export -f run_benchmark
(
echo "single-node execution (1/2)" >&2
parallel -j1 --eta --joblog ${fn}.1.joblog --header : \
run_benchmark nr_threads={nr_threads} ram={ram} cpu={cpu} \
::: i $(seq 0 5) \
::: nr_threads 1 2 4 8 12 16 \
::: cpu $(seq 0 7) \
::: ram $(seq 0 15)
echo "multi-node execution (2/2)" >&2
parallel -j1 --eta --joblog ${fn}.2.joblog --header : \
run_benchmark nr_threads={nr_threads} ram={ram} cpu={cpu} \
::: i $(seq 0 20) \
::: nr_threads 32 48 64 96 128 \
::: cpu -1 \
::: ram $(seq 0 15)
) > ${fn}.txt
xz -f -v -9 -M 800M ${fn}.txt
|