summaryrefslogtreecommitdiff
path: root/SEL
diff options
context:
space:
mode:
authorJuan Gomez Luna <juan.gomez@safari.ethz.ch>2021-06-16 19:46:05 +0200
committerJuan Gomez Luna <juan.gomez@safari.ethz.ch>2021-06-16 19:46:05 +0200
commit3de4b495fb176eba9a0eb517a4ce05903cb67acb (patch)
treefc6776a94549d2d4039898f183dbbeb2ce013ba9 /SEL
parentef5c3688c486b80a56d3c1cded25f2b2387f2668 (diff)
PrIM -- first commit
Diffstat (limited to 'SEL')
-rw-r--r--SEL/Makefile45
-rw-r--r--SEL/baselines/cpu/Makefile6
-rw-r--r--SEL/baselines/cpu/README12
-rw-r--r--SEL/baselines/cpu/app_baseline.c149
-rw-r--r--SEL/baselines/gpu/Makefile5
-rw-r--r--SEL/baselines/gpu/README21
-rw-r--r--SEL/baselines/gpu/ds.h293
-rw-r--r--SEL/baselines/gpu/kernel.cu113
-rw-r--r--SEL/baselines/gpu/select.cu242
-rw-r--r--SEL/dpu/task.c123
-rw-r--r--SEL/host/app.c255
-rwxr-xr-xSEL/support/common.h47
-rw-r--r--SEL/support/params.h56
-rwxr-xr-xSEL/support/timer.h59
14 files changed, 1426 insertions, 0 deletions
diff --git a/SEL/Makefile b/SEL/Makefile
new file mode 100644
index 0000000..65c7962
--- /dev/null
+++ b/SEL/Makefile
@@ -0,0 +1,45 @@
+DPU_DIR := dpu
+HOST_DIR := host
+BUILDDIR ?= bin
+NR_TASKLETS ?= 16
+BL ?= 10
+NR_DPUS ?= 1
+ENERGY ?= 0
+
+define conf_filename
+ ${BUILDDIR}/.NR_DPUS_$(1)_NR_TASKLETS_$(2)_BL_$(3).conf
+endef
+CONF := $(call conf_filename,${NR_DPUS},${NR_TASKLETS},${BL})
+
+HOST_TARGET := ${BUILDDIR}/host_code
+DPU_TARGET := ${BUILDDIR}/dpu_code
+
+COMMON_INCLUDES := support
+HOST_SOURCES := $(wildcard ${HOST_DIR}/*.c)
+DPU_SOURCES := $(wildcard ${DPU_DIR}/*.c)
+
+.PHONY: all clean test
+
+__dirs := $(shell mkdir -p ${BUILDDIR})
+
+COMMON_FLAGS := -Wall -Wextra -g -I${COMMON_INCLUDES}
+HOST_FLAGS := ${COMMON_FLAGS} -std=c11 -O3 `dpu-pkg-config --cflags --libs dpu` -DNR_TASKLETS=${NR_TASKLETS} -DNR_DPUS=${NR_DPUS} -DBL=${BL} -DENERGY=${ENERGY}
+DPU_FLAGS := ${COMMON_FLAGS} -O2 -DNR_TASKLETS=${NR_TASKLETS} -DBL=${BL}
+
+all: ${HOST_TARGET} ${DPU_TARGET}
+
+${CONF}:
+ $(RM) $(call conf_filename,*,*)
+ touch ${CONF}
+
+${HOST_TARGET}: ${HOST_SOURCES} ${COMMON_INCLUDES} ${CONF}
+ $(CC) -o $@ ${HOST_SOURCES} ${HOST_FLAGS}
+
+${DPU_TARGET}: ${DPU_SOURCES} ${COMMON_INCLUDES} ${CONF}
+ dpu-upmem-dpurte-clang ${DPU_FLAGS} -o $@ ${DPU_SOURCES}
+
+clean:
+ $(RM) -r $(BUILDDIR)
+
+test: all
+ ./${HOST_TARGET}
diff --git a/SEL/baselines/cpu/Makefile b/SEL/baselines/cpu/Makefile
new file mode 100644
index 0000000..9da107e
--- /dev/null
+++ b/SEL/baselines/cpu/Makefile
@@ -0,0 +1,6 @@
+all:
+ gcc -o sel -fopenmp app_baseline.c
+
+clean:
+ rm sel
+
diff --git a/SEL/baselines/cpu/README b/SEL/baselines/cpu/README
new file mode 100644
index 0000000..81ce495
--- /dev/null
+++ b/SEL/baselines/cpu/README
@@ -0,0 +1,12 @@
+Select (SEL)
+
+Compilation instructions
+
+ make
+
+Execution instructions
+
+ ./sel -i 1258291200 -t 4
+
+Read more
+J. Gomez-Luna et al., “In-place Data Sliding Algorithms for Many-core Architectures,” ICPP 2015.
diff --git a/SEL/baselines/cpu/app_baseline.c b/SEL/baselines/cpu/app_baseline.c
new file mode 100644
index 0000000..96949c3
--- /dev/null
+++ b/SEL/baselines/cpu/app_baseline.c
@@ -0,0 +1,149 @@
+/**
+* @file app.c
+* @brief Template for a Host Application Source File.
+*
+*/
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <assert.h>
+#include <stdint.h>
+#include <omp.h>
+#include "../../support/timer.h"
+
+static uint64_t *A;
+static uint64_t *B;
+static uint64_t *C;
+static uint64_t *C2;
+static int pos;
+
+bool pred(const uint64_t x){
+ return (x % 2) == 0;
+}
+
+
+void *create_test_file(unsigned int nr_elements) {
+ //srand(0);
+
+ A = (uint64_t*) malloc(nr_elements * sizeof(uint64_t));
+ B = (uint64_t*) malloc(nr_elements * sizeof(uint64_t));
+ C = (uint64_t*) malloc(nr_elements * sizeof(uint64_t));
+
+ printf("nr_elements\t%u\t", nr_elements);
+ for (int i = 0; i < nr_elements; i++) {
+ //A[i] = (unsigned int) (rand());
+ A[i] = i+1;
+ B[i] = 0;
+ }
+}
+
+/**
+* @brief compute output in the host
+*/
+static int select_host(int size, int t) {
+ pos = 0;
+ C[pos] = A[pos];
+
+ omp_set_num_threads(t);
+ #pragma omp parallel for
+ for(int my = 1; my < size; my++) {
+ if(!pred(A[my])) {
+ int p;
+ #pragma omp atomic update
+ pos++;
+ p = pos;
+ C[p] = A[my];
+ }
+ }
+ return pos;
+}
+
+// Params ---------------------------------------------------------------------
+typedef struct Params {
+ char* dpu_type;
+ int input_size;
+ int n_warmup;
+ int n_reps;
+ int n_threads;
+}Params;
+
+void usage() {
+ fprintf(stderr,
+ "\nUsage: ./program [options]"
+ "\n"
+ "\nGeneral options:"
+ "\n -h help"
+ "\n -d <D> DPU type (default=fsim)"
+ "\n -t <T> # of threads (default=8)"
+ "\n -w <W> # of untimed warmup iterations (default=2)"
+ "\n -e <E> # of timed repetition iterations (default=5)"
+ "\n"
+ "\nBenchmark-specific options:"
+ "\n -i <I> input size (default=8M elements)"
+ "\n");
+}
+
+struct Params input_params(int argc, char **argv) {
+ struct Params p;
+ p.input_size = 16 << 20;
+ p.n_warmup = 1;
+ p.n_reps = 3;
+ p.n_threads = 5;
+
+ int opt;
+ while((opt = getopt(argc, argv, "hi:w:e:t:")) >= 0) {
+ switch(opt) {
+ case 'h':
+ usage();
+ exit(0);
+ break;
+ case 'i': p.input_size = atoi(optarg); break;
+ case 'w': p.n_warmup = atoi(optarg); break;
+ case 'e': p.n_reps = atoi(optarg); break;
+ case 't': p.n_threads = atoi(optarg); break;
+ default:
+ fprintf(stderr, "\nUnrecognized option!\n");
+ usage();
+ exit(0);
+ }
+ }
+ assert(p.n_threads > 0 && "Invalid # of ranks!");
+
+ return p;
+}
+
+/**
+* @brief Main of the Host Application.
+*/
+int main(int argc, char **argv) {
+
+ struct Params p = input_params(argc, argv);
+
+ const unsigned int file_size = p.input_size;
+ uint32_t accum = 0;
+ int total_count;
+
+ // Create an input file with arbitrary data.
+ create_test_file(file_size);
+
+ Timer timer;
+ start(&timer, 0, 0);
+
+ total_count = select_host(file_size, p.n_threads);
+
+ stop(&timer, 0);
+
+ printf("Total count = %d\t", total_count);
+
+ printf("Kernel ");
+ print(&timer, 0, 1);
+ printf("\n");
+
+ free(A);
+ free(B);
+ free(C);
+ return 0;
+}
diff --git a/SEL/baselines/gpu/Makefile b/SEL/baselines/gpu/Makefile
new file mode 100644
index 0000000..22edd15
--- /dev/null
+++ b/SEL/baselines/gpu/Makefile
@@ -0,0 +1,5 @@
+all:
+ /usr/local/cuda/bin/nvcc select.cu -I/usr/local/cuda/include -lm -o select -D COARSENING=32 -D THREADS=512 -D INT64
+
+clean:
+ rm select
diff --git a/SEL/baselines/gpu/README b/SEL/baselines/gpu/README
new file mode 100644
index 0000000..e0e9a3e
--- /dev/null
+++ b/SEL/baselines/gpu/README
@@ -0,0 +1,21 @@
+Select (SEL)
+
+Compilation instructions
+
+ make
+
+Execution instructions
+
+ ./select 0 50 1258291200
+
+Compilation flags
+
+ FLOAT - For single precision arrays (Default: Double precision)
+ INT - For integer arrays (Note: Sample predicate is only for INT)
+ THREADS - Thread block size (Default: 1024)
+ COARSENING - Coarsening factor (Default: 16 (SP and INT); 8 (DP))
+ ATOMIC - Global atomics for synchronization (Default: No atomics)
+
+Read more
+J. Gomez-Luna et al., “In-place Data Sliding Algorithms for Many-core Architectures,” ICPP 2015.
+
diff --git a/SEL/baselines/gpu/ds.h b/SEL/baselines/gpu/ds.h
new file mode 100644
index 0000000..1fa73de
--- /dev/null
+++ b/SEL/baselines/gpu/ds.h
@@ -0,0 +1,293 @@
+/***************************************************************************
+ *cr
+ *cr (C) Copyright 2015 The Board of Trustees of the
+ *cr University of Illinois
+ *cr All Rights Reserved
+ *cr
+ ***************************************************************************/
+/*
+ In-Place Data Sliding Algorithms for Many-Core Architectures, presented in ICPP’15
+
+ Copyright (c) 2015 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+
+ Permission to use, copy, modify and distribute this software and its documentation for
+ educational purpose is hereby granted without fee, provided that the above copyright
+ notice and this permission notice appear in all copies of this software and that you do
+ not sell the software.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
+ OTHERWISE.
+
+ Authors: Juan Gómez-Luna (el1goluj@uco.es, gomezlun@illinois.edu), Li-Wen Chang (lchang20@illinois.edu)
+*/
+
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <ctime>
+#include <cstdio>
+#include <math.h>
+#include <sys/time.h>
+#include <vector>
+
+#ifdef FLOAT
+#define T float
+#elif INT
+#define T int
+#elif INT64
+#define T int64_t
+#else
+#define T double
+#endif
+
+#ifdef THREADS
+#define L_DIM THREADS
+#else
+#define L_DIM 1024
+#endif
+
+#ifdef COARSENING
+#define REGS COARSENING
+#else
+#ifdef FLOAT
+#define REGS 16
+#elif INT
+#define REGS 16
+#else
+#define REGS 8
+#endif
+#endif
+
+#ifdef ATOMIC
+#define ATOM 1
+#else
+#define ATOM 0
+#endif
+
+#define WARP_SIZE 32
+
+#define PRINT 0
+
+// Dynamic allocation of runtime workgroup id
+__device__ int dynamic_wg_id(volatile unsigned int *flags, const int num_flags){
+ __shared__ int gid_;
+ if (threadIdx.x == 0) gid_ = atomicAdd((unsigned int*)&flags[num_flags + 1], 1);
+ __syncthreads();
+ int my_s = gid_;
+ return my_s;
+}
+
+// Set global synchronization (regular DS)
+__device__ void ds_sync(volatile unsigned int *flags, const int my_s){
+#if ATOM
+ if (threadIdx.x == 0){
+ while (atomicOr((unsigned int*)&flags[my_s], 0) == 0){}
+ atomicOr((unsigned int*)&flags[my_s + 1], 1);
+ }
+#else
+ if (threadIdx.x == 0){
+ while (flags[my_s] == 0){}
+ flags[my_s + 1] = 1;
+ }
+#endif
+ __syncthreads();
+}
+
+// Set global synchronization (irregular DS)
+__device__ void ds_sync_irregular(volatile unsigned int *flags, const int my_s, int *count){
+#if ATOM
+ if (threadIdx.x == 0){
+ while (atomicOr((unsigned int*)&flags[my_s], 0) == 0){}
+ int flag = flags[my_s];
+ atomicAdd((unsigned int*)&flags[my_s + 1], flag + *count);
+ *count = flag - 1;
+ }
+#else
+ if (threadIdx.x == 0){
+ while (flags[my_s] == 0){}
+ int flag = flags[my_s];
+ flags[my_s + 1] = flag + *count;
+ *count = flag - 1;
+ }
+#endif
+ __syncthreads();
+}
+
+// Set global synchronization (irregular DS Partition)
+__device__ void ds_sync_irregular_partition(volatile unsigned int *flags1, volatile unsigned int *flags2, const int my_s, int *count1, int *count2){
+#if ATOM
+ if (threadIdx.x == 0){
+ while (atomicOr((unsigned int*)&flags1[my_s], 0) == 0){}
+ int flag2 = flags2[my_s];
+ atomicAdd((unsigned int*)&flags2[my_s + 1], flag2 + *count);
+ int flag1 = flags1[my_s];
+ atomicAdd((unsigned int*)&flags1[my_s + 1], flag1 + *count);
+ *count1 = flag1 - 1;
+ *count2 = flag2 - 1;
+ }
+#else
+ if (threadIdx.x == 0){
+ while (flags1[my_s] == 0){}
+ int flag2 = flags2[my_s];
+ flags2[my_s + 1] = flag2 + *count2;
+ int flag1 = flags1[my_s];
+ flags1[my_s + 1] = flag1 + *count1;
+ *count1 = flag1 - 1;
+ *count2 = flag2 - 1;
+ }
+#endif
+ __syncthreads();
+}
+
+// Reduction kernel (CUDA SDK reduce6)
+template <class S>
+__device__ void reduction(S *count, S local_cnt){
+ __shared__ S sdata[L_DIM];
+
+ unsigned int tid = threadIdx.x;
+ S mySum = local_cnt;
+
+ // each thread puts its local sum into shared memory
+ sdata[tid] = local_cnt;
+ __syncthreads();
+
+ // do reduction in shared mem
+ if ((blockDim.x >= 1024) && (tid < 512)){
+ sdata[tid] = mySum = mySum + sdata[tid + 512];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 512) && (tid < 256)){
+ sdata[tid] = mySum = mySum + sdata[tid + 256];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 256) && (tid < 128)){
+ sdata[tid] = mySum = mySum + sdata[tid + 128];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 128) && (tid < 64)){
+ sdata[tid] = mySum = mySum + sdata[tid + 64];
+ }
+ __syncthreads();
+
+#if (__CUDA_ARCH__ >= 300 )
+ if ( tid < 32 ){
+ // Fetch final intermediate sum from 2nd warp
+ if (blockDim.x >= 64) mySum += sdata[tid + 32];
+ // Reduce final warp using shuffle
+ #pragma unroll
+ for (int offset = WARP_SIZE/2; offset > 0; offset /= 2){
+ //mySum += __shfl_down(mySum, offset);
+ mySum += __shfl_xor(mySum, offset);
+ }
+ }
+#else
+ // fully unroll reduction within a single warp
+ if ((blockDim.x >= 64) && (tid < 32)){
+ sdata[tid] = mySum = mySum + sdata[tid + 32];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 32) && (tid < 16)){
+ sdata[tid] = mySum = mySum + sdata[tid + 16];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 16) && (tid < 8)){
+ sdata[tid] = mySum = mySum + sdata[tid + 8];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 8) && (tid < 4)){
+ sdata[tid] = mySum = mySum + sdata[tid + 4];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 4) && (tid < 2)){
+ sdata[tid] = mySum = mySum + sdata[tid + 2];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 2) && ( tid < 1)){
+ sdata[tid] = mySum = mySum + sdata[tid + 1];
+ }
+ __syncthreads();
+#endif
+
+ // write result for this block to global mem
+ if (tid == 0) *count = mySum;
+}
+
+// Binary prefix-sum (GPU Computing Gems)
+__device__ inline int lane_id(void) { return threadIdx.x % WARP_SIZE; }
+__device__ inline int warp_id(void) { return threadIdx.x / WARP_SIZE; }
+
+__device__ unsigned int warp_prefix_sums(bool p){
+ unsigned int b = __ballot(p);
+ return __popc(b & ((1 << lane_id()) - 1));
+}
+
+__device__ int warp_scan(int val, volatile int *s_data){
+#if (__CUDA_ARCH__ < 300 )
+ int idx = 2 * threadIdx.x - (threadIdx.x & (WARP_SIZE - 1));
+ s_data[idx] = 0;
+ idx += WARP_SIZE;
+ int t = s_data[idx] = val;
+ s_data[idx] = t = t + s_data[idx - 1];
+ s_data[idx] = t = t + s_data[idx - 2];
+ s_data[idx] = t = t + s_data[idx - 4];
+ s_data[idx] = t = t + s_data[idx - 8];
+ s_data[idx] = t = t + s_data[idx - 16];
+ return s_data[idx - 1];
+#else
+ int x = val;
+ #pragma unroll
+ for(int offset = 1; offset < 32; offset <<= 1){
+ // From GTC: Kepler shuffle tips and tricks:
+#if 0
+ int y = __shfl_up(x, offset);
+ if(lane_id() >= offset)
+ x += y;
+#else
+ asm volatile("{"
+ " .reg .s32 r0;"
+ " .reg .pred p;"
+ " shfl.up.b32 r0|p, %0, %1, 0x0;"
+ " @p add.s32 r0, r0, %0;"
+ " mov.s32 %0, r0;"
+ "}" : "+r"(x) : "r"(offset));
+#endif
+ }
+ return x - val;
+#endif
+}
+
+__device__ int block_binary_prefix_sums(int* count, int x){
+
+ __shared__ int sdata[L_DIM];
+
+ // A. Exclusive scan within each warp
+ int warpPrefix = warp_prefix_sums(x);
+
+ // B. Store in shared memory
+ if(lane_id() == WARP_SIZE - 1)
+ sdata[warp_id()] = warpPrefix + x;
+ __syncthreads();
+
+ // C. One warp scans in shared memory
+ if(threadIdx.x < WARP_SIZE)
+ sdata[threadIdx.x] = warp_scan(sdata[threadIdx.x], sdata);
+ __syncthreads();
+
+ // D. Each thread calculates it final value
+ int thread_out_element = warpPrefix + sdata[warp_id()];
+ int output = thread_out_element + *count;
+ __syncthreads();
+ if(threadIdx.x == blockDim.x - 1)
+ *count += (thread_out_element + x);
+
+ return output;
+}
diff --git a/SEL/baselines/gpu/kernel.cu b/SEL/baselines/gpu/kernel.cu
new file mode 100644
index 0000000..607dfd1
--- /dev/null
+++ b/SEL/baselines/gpu/kernel.cu
@@ -0,0 +1,113 @@
+/***************************************************************************
+ *cr
+ *cr (C) Copyright 2015 The Board of Trustees of the
+ *cr University of Illinois
+ *cr All Rights Reserved
+ *cr
+ ***************************************************************************/
+/*
+ In-Place Data Sliding Algorithms for Many-Core Architectures, presented in ICPP’15
+
+ Copyright (c) 2015 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+
+ Permission to use, copy, modify and distribute this software and its documentation for
+ educational purpose is hereby granted without fee, provided that the above copyright
+ notice and this permission notice appear in all copies of this software and that you do
+ not sell the software.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
+ OTHERWISE.
+
+ Authors: Juan Gómez-Luna (el1goluj@uco.es, gomezlun@illinois.edu), Li-Wen Chang (lchang20@illinois.edu)
+*/
+
+__global__ void select_remove_if(T *matrix_out, T *matrix,
+ int size,
+ volatile unsigned int *flags,
+ struct is_even pred)
+{
+ __shared__ int count; // Counter for number of non-zero elements per block
+ const int num_flags = size % (blockDim.x * REGS) == 0 ? size / (blockDim.x * REGS) : size / (blockDim.x * REGS) + 1;
+
+ // Dynamic allocation of runtime workgroup id
+ if (threadIdx.x == 0) count = 0;
+ const int my_s = dynamic_wg_id(flags, num_flags);
+
+ int local_cnt = 0;
+ // Declare on-chip memory
+ T reg[REGS];
+ int pos = my_s * REGS * blockDim.x + threadIdx.x;
+ // Load in on-chip memory
+ #pragma unroll
+ for (int j = 0; j < REGS; j++){
+ if (pos < size){
+ reg[j] = matrix[pos];
+ if(!pred(reg[j]))
+ local_cnt++;
+ else
+ reg[j] = -1;
+ }
+ else
+ reg[j] = -1;
+ pos += blockDim.x;
+ }
+ reduction<int>(&count, local_cnt);
+
+ // Set global synch
+ ds_sync_irregular(flags, my_s, &count);
+
+ // Store to global memory
+ #pragma unroll
+ for (int j = 0; j < REGS; j++){
+ pos = block_binary_prefix_sums(&count, reg[j] >= 0);
+ if (reg[j] >= 0){
+ matrix_out[pos] = reg[j];
+ }
+ }
+}
+
+__global__ void select_copy_if(T *matrix_out, T *matrix,
+ int size,
+ volatile unsigned int *flags,
+ struct is_even pred)
+{
+ __shared__ int count; // Counter for number of non-zero elements per block
+ const int num_flags = size % (blockDim.x * REGS) == 0 ? size / (blockDim.x * REGS) : size / (blockDim.x * REGS) + 1;
+
+ // Dynamic allocation of runtime workgroup id
+ if (threadIdx.x == 0) count = 0;
+ const int my_s = dynamic_wg_id(flags, num_flags);
+
+ int local_cnt = 0;
+ // Declare on-chip memory
+ T reg[REGS];
+ int pos = my_s * REGS * blockDim.x + threadIdx.x;
+ // Load in on-chip memory
+ #pragma unroll
+ for (int j = 0; j < REGS; j++){
+ if (pos < size){
+ reg[j] = matrix[pos];
+ if(pred(reg[j]))
+ local_cnt++;
+ else
+ reg[j] = -1;
+ }
+ else
+ reg[j] = -1;
+ pos += blockDim.x;
+ }
+ reduction<int>(&count, local_cnt);
+
+ // Set global synch
+ ds_sync_irregular(flags, my_s, &count);
+
+ // Store to global memory
+ #pragma unroll
+ for (int j = 0; j < REGS; j++){
+ pos = block_binary_prefix_sums(&count, reg[j] >= 0);
+ if (reg[j] >= 0){
+ matrix_out[pos] = reg[j];
+ }
+ }
+}
diff --git a/SEL/baselines/gpu/select.cu b/SEL/baselines/gpu/select.cu
new file mode 100644
index 0000000..b3c60d6
--- /dev/null
+++ b/SEL/baselines/gpu/select.cu
@@ -0,0 +1,242 @@
+/***************************************************************************
+ *cr
+ *cr (C) Copyright 2015 The Board of Trustees of the
+ *cr University of Illinois
+ *cr All Rights Reserved
+ *cr
+ ***************************************************************************/
+/*
+ In-Place Data Sliding Algorithms for Many-Core Architectures, presented in ICPP’15
+
+ Copyright (c) 2015 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+
+ Permission to use, copy, modify and distribute this software and its documentation for
+ educational purpose is hereby granted without fee, provided that the above copyright
+ notice and this permission notice appear in all copies of this software and that you do
+ not sell the software.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
+ OTHERWISE.
+
+ Authors: Juan Gómez-Luna (el1goluj@uco.es, gomezlun@illinois.edu), Li-Wen Chang (lchang20@illinois.edu)
+*/
+
+#include "ds.h"
+
+// Sample predicate for partition (only for INT)
+struct is_even{
+ __host__ __device__
+ bool operator()(const T &x){
+ return (x % 2) == 0;
+ }
+};
+
+#include "kernel.cu"
+
+// Sequential CPU version
+void cpu_copy_if(T* output, T* input, int elements, struct is_even pred){
+ int pos = 0;
+ for (int i = 0; i < elements; i++){
+ if(pred(input[i])){
+ output[pos] = input[i];
+ pos++;
+ }
+ }
+}
+void cpu_remove_if(T* input, int elements, struct is_even pred){
+ int pos = 0;
+ for (int i = 0; i < elements; i++){
+ if(!pred(input[i])){
+ input[pos] = input[i];
+ pos++;
+ }
+ }
+}
+
+int main(int argc, char **argv){
+
+ // Syntax verification
+ if (argc != 4) {
+ printf("Wrong format\n");
+ printf("Syntax: %s <Device Input (%% elements) numElements>\n",argv[0]);
+ exit(1);
+ }
+ int device = atoi(argv[1]);
+ int input = atoi(argv[2]);
+ int numElements = atoi(argv[3]);
+ size_t size = numElements * sizeof(T);
+
+ // Set device
+ cudaDeviceProp device_properties;
+ cudaGetDeviceProperties(&device_properties,device);
+ cudaSetDevice(device);
+
+ printf("DS Select on %s\n", device_properties.name);
+ printf("Thread block size = %d\n", L_DIM);
+ printf("Coarsening factor = %d\n", REGS);
+#ifdef FLOAT
+ printf("Single precision array: %d elements\n", numElements);
+#elif INT
+ printf("Integer array: %d elements\n", numElements);
+#else
+ printf("Double precision array: %d elements\n", numElements);
+#endif
+
+ // Event creation
+ cudaEvent_t start, stop;
+ cudaEventCreate(&start);
+ cudaEventCreate(&stop);
+
+ float time1 = 0;
+ float time2 = 0;
+ float time3 = 0;
+
+ // Allocate the host input vector A
+ T *h_A = (T*)malloc(size);
+
+ // Allocate the host output vectors
+ T *h_B = (T*)malloc(size);
+ T *h_C = (T*)malloc(size);
+ T *h_D = (T*)malloc(size);
+
+ // Allocate the device input vector A and output vector B
+ T *d_A = NULL;
+ cudaMalloc((void **)&d_A, size);
+ T *d_B = NULL;
+ cudaMalloc((void **)&d_B, size);
+
+#define WARMUP 2
+#define REP 10
+ unsigned int flagM1 = 0;
+ unsigned int flagM2 = 0;
+ for(int iteration = 0; iteration < REP+WARMUP; iteration++){
+ // Initialize the host input vectors
+ srand(2014);
+ for(int i = 0; i < numElements; i++)
+ h_A[i] = i % 2 != 0 ? i:i+1;
+ int M = (numElements * input)/100;
+ int m = M;
+ while(m>0){
+ int x = (int)(numElements*(((float)rand()/(float)RAND_MAX)));
+ if(h_A[x] % 2 != 0){
+ h_A[x] = x * 2;
+ m--;
+ }
+ }
+
+#if PRINT
+ for(int i = 0; i < numElements; ++i){
+ printf("%d ",*(h_A+i));
+ }
+ printf("\n");
+#endif
+
+ // Copy the host input vector A in host memory to the device input vector in device memory
+ cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
+
+ int ldim = L_DIM;
+ // Atomic flags
+ unsigned int* d_flags = NULL;
+ const int num_flags = numElements % (ldim * REGS) == 0 ? numElements / (ldim * REGS) : numElements / (ldim * REGS) + 1;
+ unsigned int *flags = (unsigned int *)calloc(sizeof(unsigned int), num_flags + 2);
+ flags[0] = 1;
+ flags[num_flags + 1] = 0;
+ cudaMalloc((void **)&d_flags, (num_flags + 2) * sizeof(unsigned int));
+ cudaMemcpy(d_flags, flags, (num_flags + 2) * sizeof(unsigned int), cudaMemcpyHostToDevice);
+ // Number of work-groups/thread blocks
+ int num_wg = num_flags;
+
+ // Start timer
+ cudaEventRecord( start, 0 );
+
+ // Kernel launch (Copy_if)
+ select_copy_if<<<num_wg, ldim>>>(d_B, d_A, numElements, d_flags, is_even());
+
+ cudaMemcpy(&flagM1, d_flags + num_flags, sizeof(unsigned int), cudaMemcpyDeviceToHost);
+
+ // Stop timer
+ cudaEventRecord( stop, 0 );
+ cudaEventSynchronize( stop );
+ cudaEventElapsedTime( &time1, start, stop );
+ if(iteration >= WARMUP) time2 += time1;
+
+ if(iteration == REP+WARMUP-1){
+ float timer = time2 / REP;
+ double bw = (double)((numElements + flagM1) * sizeof(T)) / (double)(timer * 1000000.0);
+ printf("Copy_if - Execution time = %f ms, Throughput = %f GB/s\n", timer, bw);
+ }
+
+ // Atomic flags
+ cudaMemcpy(d_flags, flags, (num_flags + 2) * sizeof(unsigned int), cudaMemcpyHostToDevice);
+ free(flags);
+
+ // Start timer
+ cudaEventRecord( start, 0 );
+
+ // Kernel launch (Remove_if)
+ select_remove_if<<<num_wg, ldim>>>(d_A, d_A, numElements, d_flags, is_even());
+
+ cudaMemcpy(&flagM2, d_flags + num_flags, sizeof(unsigned int), cudaMemcpyDeviceToHost);
+
+ // End timer
+ cudaEventRecord( stop, 0 );
+ cudaEventSynchronize( stop );
+ cudaEventElapsedTime( &time1, start, stop );
+ if(iteration >= WARMUP) time3 += time1;
+
+ if(iteration == REP+WARMUP-1){
+ float timer = time3 / REP;
+ double bw = (double)((numElements + flagM2) * sizeof(T)) / (double)(timer * 1000000.0);
+ printf("Remove_if - Execution time = %f ms, Throughput = %f GB/s\n", timer, bw);
+ }
+
+ // Free flags
+ cudaFree(d_flags);
+ }
+ // Copy to host memory
+ cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost);
+ cudaMemcpy(h_C, d_A, size, cudaMemcpyDeviceToHost);
+
+ // CPU execution for comparison
+ cpu_copy_if(h_D, h_A, numElements, is_even());
+ cpu_remove_if(h_A, numElements, is_even());
+
+ // Verify that the result vector is correct
+#if PRINT
+ for(int i = 0; i < numElements; ++i){
+ printf("%d ",*(h_B+i));
+ }
+ printf("\n");
+ for(int i = 0; i < numElements; ++i){
+ printf("%d ",*(h_D+i));
+ }
+ printf("\n");
+#endif
+ for (int i = 0; i < flagM1 - 1; ++i){
+ if (h_B[i] != h_D[i]){
+ fprintf(stderr, "Copy_if - Result verification failed at element %d!\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+ for (int i = 0; i < flagM2 - 1; ++i){
+ if (h_C[i] != h_A[i]){
+ fprintf(stderr, "Remove_if - Result verification failed at element %d!\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+ printf("Test PASSED\n");
+
+ // Free device global memory
+ cudaFree(d_A);
+ cudaFree(d_B);
+ cudaEventDestroy(start);
+ cudaEventDestroy(stop);
+ // Free host memory
+ free(h_A);
+ free(h_B);
+ free(h_C);
+ free(h_D);
+
+ return 0;
+}
diff --git a/SEL/dpu/task.c b/SEL/dpu/task.c
new file mode 100644
index 0000000..c161d10
--- /dev/null
+++ b/SEL/dpu/task.c
@@ -0,0 +1,123 @@
+/*
+* Select with multiple tasklets
+*
+*/
+#include <stdint.h>
+#include <stdio.h>
+#include <defs.h>
+#include <mram.h>
+#include <alloc.h>
+#include <perfcounter.h>
+#include <handshake.h>
+#include <barrier.h>
+
+#include "../support/common.h"
+
+__host dpu_arguments_t DPU_INPUT_ARGUMENTS;
+__host dpu_results_t DPU_RESULTS[NR_TASKLETS];
+
+// Array for communication between adjacent tasklets
+uint32_t message[NR_TASKLETS];
+uint32_t message_partial_count;
+
+// SEL in each tasklet
+static unsigned int select(T *output, T *input){
+ unsigned int pos = 0;
+ #pragma unroll
+ for(unsigned int j = 0; j < REGS; j++) {
+ if(!pred(input[j])) {
+ output[pos] = input[j];
+ pos++;
+ }
+ }
+ return pos;
+}
+
+// Handshake with adjacent tasklets
+static unsigned int handshake_sync(unsigned int l_count, unsigned int tasklet_id){
+ unsigned int p_count;
+ // Wait and read message
+ if(tasklet_id != 0){
+ handshake_wait_for(tasklet_id - 1);
+ p_count = message[tasklet_id];
+ }
+ else
+ p_count = 0;
+ // Write message and notify
+ if(tasklet_id < NR_TASKLETS - 1){
+ message[tasklet_id + 1] = p_count + l_count;
+ handshake_notify();
+ }
+ return p_count;
+}
+
+// Barrier
+BARRIER_INIT(my_barrier, NR_TASKLETS);
+
+extern int main_kernel1(void);
+
+int (*kernels[nr_kernels])(void) = {main_kernel1};
+
+int main(void) {
+ // Kernel
+ return kernels[DPU_INPUT_ARGUMENTS.kernel]();
+}
+
+// main_kernel1
+int main_kernel1() {
+ unsigned int tasklet_id = me();
+#if PRINT
+ printf("tasklet_id = %u\n", tasklet_id);
+#endif
+ if (tasklet_id == 0){ // Initialize once the cycle counter
+ mem_reset(); // Reset the heap
+ }
+ // Barrier
+ barrier_wait(&my_barrier);
+
+ dpu_results_t *result = &DPU_RESULTS[tasklet_id];
+
+ uint32_t input_size_dpu_bytes = DPU_INPUT_ARGUMENTS.size;
+
+ // Address of the current processing block in MRAM
+ uint32_t base_tasklet = tasklet_id << BLOCK_SIZE_LOG2;
+ uint32_t mram_base_addr_A = (uint32_t)DPU_MRAM_HEAP_POINTER;
+ uint32_t mram_base_addr_B = (uint32_t)(DPU_MRAM_HEAP_POINTER + input_size_dpu_bytes);
+
+ // Initialize a local cache to store the MRAM block
+ T *cache_A = (T *) mem_alloc(BLOCK_SIZE);
+ T *cache_B = (T *) mem_alloc(BLOCK_SIZE);
+
+ // Initialize shared variable
+ if(tasklet_id == NR_TASKLETS - 1)
+ message_partial_count = 0;
+ // Barrier
+ barrier_wait(&my_barrier);
+
+ for(unsigned int byte_index = base_tasklet; byte_index < input_size_dpu_bytes; byte_index += BLOCK_SIZE * NR_TASKLETS){
+
+ // Load cache with current MRAM block
+ mram_read((__mram_ptr void const*)(mram_base_addr_A + byte_index), cache_A, BLOCK_SIZE);
+
+ // SELECT in each tasklet
+ uint32_t l_count = select(cache_B, cache_A); // In-place or out-of-place?
+
+ // Sync with adjacent tasklets
+ uint32_t p_count = handshake_sync(l_count, tasklet_id);
+
+ // Barrier
+ barrier_wait(&my_barrier);
+
+ // Write cache to current MRAM block
+ mram_write(cache_B, (__mram_ptr void*)(mram_base_addr_B + (message_partial_count + p_count) * sizeof(T)), l_count * sizeof(T));
+
+ // Total count in this DPU
+ if(tasklet_id == NR_TASKLETS - 1){
+ result->t_count = message_partial_count + p_count + l_count;
+ message_partial_count = result->t_count;
+ }
+
+ }
+
+ return 0;
+}
diff --git a/SEL/host/app.c b/SEL/host/app.c
new file mode 100644
index 0000000..ef07cf9
--- /dev/null
+++ b/SEL/host/app.c
@@ -0,0 +1,255 @@
+/**
+* app.c
+* SEL Host Application Source File
+*
+*/
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <dpu.h>
+#include <dpu_log.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <assert.h>
+
+#include "../support/common.h"
+#include "../support/timer.h"
+#include "../support/params.h"
+
+// Define the DPU Binary path as DPU_BINARY here
+#ifndef DPU_BINARY
+#define DPU_BINARY "./bin/dpu_code"
+#endif
+
+#if ENERGY
+#include <dpu_probe.h>
+#endif
+
+// Pointer declaration
+static T* A;
+static T* C;
+static T* C2;
+
+// Create input arrays
+static void read_input(T* A, unsigned int nr_elements, unsigned int nr_elements_round) {
+ //srand(0);
+ printf("nr_elements\t%u\t", nr_elements);
+ for (unsigned int i = 0; i < nr_elements; i++) {
+ //A[i] = (T) (rand());
+ A[i] = i + 1;
+ }
+ for (unsigned int i = nr_elements; i < nr_elements_round; i++) { // Complete with removable elements
+ A[i] = 0;
+ }
+}
+
+// Compute output in the host
+static unsigned int select_host(T* C, T* A, unsigned int nr_elements) {
+ unsigned int pos = 0;
+ for (unsigned int i = 0; i < nr_elements; i++) {
+ if(!pred(A[i])) {
+ C[pos] = A[i];
+ pos++;
+ }
+ }
+ return pos;
+}
+
+// Main of the Host Application
+int main(int argc, char **argv) {
+
+ struct Params p = input_params(argc, argv);
+
+ struct dpu_set_t dpu_set, dpu;
+ uint32_t nr_of_dpus;
+
+#if ENERGY
+ struct dpu_probe_t probe;
+ DPU_ASSERT(dpu_probe_init("energy_probe", &probe));
+#endif
+
+ // Allocate DPUs and load binary
+ DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set));
+ DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL));
+ DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus));
+ printf("Allocated %d DPU(s)\n", nr_of_dpus);
+
+ unsigned int i = 0;
+ uint32_t accum = 0;
+ uint32_t total_count = 0;
+
+ const unsigned int input_size = p.exp == 0 ? p.input_size * nr_of_dpus : p.input_size; // Total input size (weak or strong scaling)
+ const unsigned int input_size_dpu_ = divceil(input_size, nr_of_dpus); // Input size per DPU (max.)
+ const unsigned int input_size_dpu_round =
+ (input_size_dpu_ % (NR_TASKLETS * REGS) != 0) ? roundup(input_size_dpu_, (NR_TASKLETS * REGS)) : input_size_dpu_; // Input size per DPU (max.), 8-byte aligned
+
+ // Input/output allocation
+ A = malloc(input_size_dpu_round * nr_of_dpus * sizeof(T));
+ C = malloc(input_size_dpu_round * nr_of_dpus * sizeof(T));
+ C2 = malloc(input_size_dpu_round * nr_of_dpus * sizeof(T));
+ T *bufferA = A;
+ T *bufferC = C2;
+
+ // Create an input file with arbitrary data
+ read_input(A, input_size, input_size_dpu_round * nr_of_dpus);
+
+ // Timer declaration
+ Timer timer;
+
+ printf("NR_TASKLETS\t%d\tBL\t%d\n", NR_TASKLETS, BL);
+
+ // Loop over main kernel
+ for(int rep = 0; rep < p.n_warmup + p.n_reps; rep++) {
+
+ // Compute output on CPU (performance comparison and verification purposes)
+ if(rep >= p.n_warmup)
+ start(&timer, 0, rep - p.n_warmup);
+ total_count = select_host(C, A, input_size);
+ if(rep >= p.n_warmup)
+ stop(&timer, 0);
+
+ printf("Load input data\n");
+ if(rep >= p.n_warmup)
+ start(&timer, 1, rep - p.n_warmup);
+ // Input arguments
+ const unsigned int input_size_dpu = input_size_dpu_round;
+ unsigned int kernel = 0;
+ dpu_arguments_t input_arguments = {input_size_dpu * sizeof(T), kernel};
+ // Copy input arrays
+ i = 0;
+ DPU_FOREACH(dpu_set, dpu, i) {
+ DPU_ASSERT(dpu_prepare_xfer(dpu, &input_arguments));
+ }
+ DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(input_arguments), DPU_XFER_DEFAULT));
+ DPU_FOREACH(dpu_set, dpu, i) {
+ DPU_ASSERT(dpu_prepare_xfer(dpu, bufferA + input_size_dpu * i));
+ }
+ DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, input_size_dpu * sizeof(T), DPU_XFER_DEFAULT));
+ if(rep >= p.n_warmup)
+ stop(&timer, 1);
+
+ printf("Run program on DPU(s) \n");
+ // Run DPU kernel
+ if(rep >= p.n_warmup) {
+ start(&timer, 2, rep - p.n_warmup);
+ #if ENERGY
+ DPU_ASSERT(dpu_probe_start(&probe));
+ #endif
+ }
+ DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS));
+ if(rep >= p.n_warmup) {
+ stop(&timer, 2);
+ #if ENERGY
+ DPU_ASSERT(dpu_probe_stop(&probe));
+ #endif
+ }
+
+#if PRINT
+ {
+ unsigned int each_dpu = 0;
+ printf("Display DPU Logs\n");
+ DPU_FOREACH (dpu_set, dpu) {
+ printf("DPU#%d:\n", each_dpu);
+ DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout));
+ each_dpu++;
+ }
+ }
+#endif
+
+ printf("Retrieve results\n");
+ dpu_results_t results[nr_of_dpus];
+ uint32_t* results_scan = malloc(nr_of_dpus * sizeof(uint32_t));
+ i = 0;
+ accum = 0;
+
+ if(rep >= p.n_warmup)
+ start(&timer, 3, rep - p.n_warmup);
+ // PARALLEL RETRIEVE TRANSFER
+ dpu_results_t* results_retrieve[nr_of_dpus];
+
+ DPU_FOREACH(dpu_set, dpu, i) {
+ results_retrieve[i] = (dpu_results_t*)malloc(NR_TASKLETS * sizeof(dpu_results_t));
+ DPU_ASSERT(dpu_prepare_xfer(dpu, results_retrieve[i]));
+ }
+ DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, "DPU_RESULTS", 0, NR_TASKLETS * sizeof(dpu_results_t), DPU_XFER_DEFAULT));
+
+ DPU_FOREACH(dpu_set, dpu, i) {
+ // Retrieve tasklet timings
+ for (unsigned int each_tasklet = 0; each_tasklet < NR_TASKLETS; each_tasklet++) {
+ // Count of this DPU
+ if(each_tasklet == NR_TASKLETS - 1){
+ results[i].t_count = results_retrieve[i][each_tasklet].t_count;
+ }
+ }
+ // Sequential scan
+ uint32_t temp = results[i].t_count;
+ results_scan[i] = accum;
+ accum += temp;
+#if PRINT
+ printf("i=%d -- %u, %u, %u\n", i, results_scan[i], accum, temp);
+#endif
+ free(results_retrieve[i]);
+ }
+ if(rep >= p.n_warmup)
+ stop(&timer, 3);
+
+ i = 0;
+ if(rep >= p.n_warmup)
+ start(&timer, 4, rep - p.n_warmup);
+ DPU_FOREACH (dpu_set, dpu) {
+ // Copy output array
+ DPU_ASSERT(dpu_copy_from(dpu, DPU_MRAM_HEAP_POINTER_NAME, input_size_dpu * sizeof(T), bufferC + results_scan[i], results[i].t_count * sizeof(T)));
+
+ i++;
+ }
+ if(rep >= p.n_warmup)
+ stop(&timer, 4);
+
+ // Free memory
+ free(results_scan);
+ }
+
+ // Print timing results
+ printf("CPU ");
+ print(&timer, 0, p.n_reps);
+ printf("CPU-DPU ");
+ print(&timer, 1, p.n_reps);
+ printf("DPU Kernel ");
+ print(&timer, 2, p.n_reps);
+ printf("Inter-DPU ");
+ print(&timer, 3, p.n_reps);
+ printf("DPU-CPU ");
+ print(&timer, 4, p.n_reps);
+
+ #if ENERGY
+ double energy;
+ DPU_ASSERT(dpu_probe_get(&probe, DPU_ENERGY, DPU_AVERAGE, &energy));
+ printf("DPU Energy (J): %f\t", energy);
+ #endif
+
+ // Check output
+ bool status = true;
+ if(accum != total_count) status = false;
+ for (i = 0; i < accum; i++) {
+ if(C[i] != bufferC[i]){
+ status = false;
+#if PRINT
+ printf("%d: %lu -- %lu\n", i, C[i], bufferC[i]);
+#endif
+ }
+ }
+ if (status) {
+ printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n");
+ } else {
+ printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n");
+ }
+
+ // Deallocation
+ free(A);
+ free(C);
+ free(C2);
+ DPU_ASSERT(dpu_free(dpu_set));
+
+ return status ? 0 : -1;
+}
diff --git a/SEL/support/common.h b/SEL/support/common.h
new file mode 100755
index 0000000..72270b0
--- /dev/null
+++ b/SEL/support/common.h
@@ -0,0 +1,47 @@
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+// Structures used by both the host and the dpu to communicate information
+typedef struct {
+ uint32_t size;
+ enum kernels {
+ kernel1 = 0,
+ nr_kernels = 1,
+ } kernel;
+} dpu_arguments_t;
+
+typedef struct {
+ uint32_t t_count;
+} dpu_results_t;
+
+// Transfer size between MRAM and WRAM
+#ifdef BL
+#define BLOCK_SIZE_LOG2 BL
+#define BLOCK_SIZE (1 << BLOCK_SIZE_LOG2)
+#else
+#define BLOCK_SIZE_LOG2 8
+#define BLOCK_SIZE (1 << BLOCK_SIZE_LOG2)
+#define BL BLOCK_SIZE_LOG2
+#endif
+
+// Data type
+#define T uint64_t
+#define REGS (BLOCK_SIZE >> 3) // 64 bits
+
+// Sample predicate
+bool pred(const T x){
+ return (x % 2) == 0;
+}
+
+#ifndef ENERGY
+#define ENERGY 0
+#endif
+#define PRINT 0
+
+#define ANSI_COLOR_RED "\x1b[31m"
+#define ANSI_COLOR_GREEN "\x1b[32m"
+#define ANSI_COLOR_RESET "\x1b[0m"
+
+#define divceil(n, m) (((n)-1) / (m) + 1)
+#define roundup(n, m) ((n / m) * m + m)
+#endif
diff --git a/SEL/support/params.h b/SEL/support/params.h
new file mode 100644
index 0000000..bb86211
--- /dev/null
+++ b/SEL/support/params.h
@@ -0,0 +1,56 @@
+#ifndef _PARAMS_H_
+#define _PARAMS_H_
+
+#include "common.h"
+
+typedef struct Params {
+ unsigned int input_size;
+ int n_warmup;
+ int n_reps;
+ int exp;
+}Params;
+
+static void usage() {
+ fprintf(stderr,
+ "\nUsage: ./program [options]"
+ "\n"
+ "\nGeneral options:"
+ "\n -h help"
+ "\n -w <W> # of untimed warmup iterations (default=1)"
+ "\n -e <E> # of timed repetition iterations (default=3)"
+ "\n -x <X> Weak (0) or strong (1) scaling (default=0)"
+ "\n"
+ "\nBenchmark-specific options:"
+ "\n -i <I> input size (default=3932160 elements)"
+ "\n");
+}
+
+struct Params input_params(int argc, char **argv) {
+ struct Params p;
+ p.input_size = 3932160;
+ p.n_warmup = 1;
+ p.n_reps = 3;
+ p.exp = 0;
+
+ int opt;
+ while((opt = getopt(argc, argv, "hi:w:e:x:")) >= 0) {
+ switch(opt) {
+ case 'h':
+ usage();
+ exit(0);
+ break;
+ case 'i': p.input_size = atoi(optarg); break;
+ case 'w': p.n_warmup = atoi(optarg); break;
+ case 'e': p.n_reps = atoi(optarg); break;
+ case 'x': p.exp = atoi(optarg); break;
+ default:
+ fprintf(stderr, "\nUnrecognized option!\n");
+ usage();
+ exit(0);
+ }
+ }
+ assert(NR_DPUS > 0 && "Invalid # of dpus!");
+
+ return p;
+}
+#endif
diff --git a/SEL/support/timer.h b/SEL/support/timer.h
new file mode 100755
index 0000000..b53d95f
--- /dev/null
+++ b/SEL/support/timer.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016 University of Cordoba and University of Illinois
+ * All rights reserved.
+ *
+ * Developed by: IMPACT Research Group
+ * University of Cordoba and University of Illinois
+ * http://impact.crhc.illinois.edu/
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * with the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * > Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimers.
+ * > Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimers in the
+ * documentation and/or other materials provided with the distribution.
+ * > Neither the names of IMPACT Research Group, University of Cordoba,
+ * University of Illinois nor the names of its contributors may be used
+ * to endorse or promote products derived from this Software without
+ * specific prior written permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
+ * THE SOFTWARE.
+ *
+ */
+
+#include <sys/time.h>
+
+typedef struct Timer{
+
+ struct timeval startTime[7];
+ struct timeval stopTime[7];
+ double time[7];
+
+}Timer;
+
+void start(Timer *timer, int i, int rep) {
+ if(rep == 0) {
+ timer->time[i] = 0.0;
+ }
+ gettimeofday(&timer->startTime[i], NULL);
+}
+
+void stop(Timer *timer, int i) {
+ gettimeofday(&timer->stopTime[i], NULL);
+ timer->time[i] += (timer->stopTime[i].tv_sec - timer->startTime[i].tv_sec) * 1000000.0 +
+ (timer->stopTime[i].tv_usec - timer->startTime[i].tv_usec);
+}
+
+void print(Timer *timer, int i, int REP) { printf("Time (ms): %f\t", timer->time[i] / (1000 * REP)); }