summaryrefslogtreecommitdiff
path: root/UNI
diff options
context:
space:
mode:
Diffstat (limited to 'UNI')
-rw-r--r--UNI/Makefile45
-rw-r--r--UNI/baselines/cpu/Makefile6
-rw-r--r--UNI/baselines/cpu/README12
-rw-r--r--UNI/baselines/cpu/app_baseline.c143
-rw-r--r--UNI/baselines/gpu/Makefile5
-rw-r--r--UNI/baselines/gpu/README20
-rw-r--r--UNI/baselines/gpu/ds.h293
-rw-r--r--UNI/baselines/gpu/kernel.cu91
-rw-r--r--UNI/baselines/gpu/unique.cu197
-rw-r--r--UNI/dpu/task.c147
-rw-r--r--UNI/host/app.c277
-rwxr-xr-xUNI/support/common.h46
-rw-r--r--UNI/support/params.h56
-rwxr-xr-xUNI/support/timer.h59
14 files changed, 1397 insertions, 0 deletions
diff --git a/UNI/Makefile b/UNI/Makefile
new file mode 100644
index 0000000..65c7962
--- /dev/null
+++ b/UNI/Makefile
@@ -0,0 +1,45 @@
+DPU_DIR := dpu
+HOST_DIR := host
+BUILDDIR ?= bin
+NR_TASKLETS ?= 16
+BL ?= 10
+NR_DPUS ?= 1
+ENERGY ?= 0
+
+define conf_filename
+ ${BUILDDIR}/.NR_DPUS_$(1)_NR_TASKLETS_$(2)_BL_$(3).conf
+endef
+CONF := $(call conf_filename,${NR_DPUS},${NR_TASKLETS},${BL})
+
+HOST_TARGET := ${BUILDDIR}/host_code
+DPU_TARGET := ${BUILDDIR}/dpu_code
+
+COMMON_INCLUDES := support
+HOST_SOURCES := $(wildcard ${HOST_DIR}/*.c)
+DPU_SOURCES := $(wildcard ${DPU_DIR}/*.c)
+
+.PHONY: all clean test
+
+__dirs := $(shell mkdir -p ${BUILDDIR})
+
+COMMON_FLAGS := -Wall -Wextra -g -I${COMMON_INCLUDES}
+HOST_FLAGS := ${COMMON_FLAGS} -std=c11 -O3 `dpu-pkg-config --cflags --libs dpu` -DNR_TASKLETS=${NR_TASKLETS} -DNR_DPUS=${NR_DPUS} -DBL=${BL} -DENERGY=${ENERGY}
+DPU_FLAGS := ${COMMON_FLAGS} -O2 -DNR_TASKLETS=${NR_TASKLETS} -DBL=${BL}
+
+all: ${HOST_TARGET} ${DPU_TARGET}
+
+${CONF}:
+ $(RM) $(call conf_filename,*,*)
+ touch ${CONF}
+
+${HOST_TARGET}: ${HOST_SOURCES} ${COMMON_INCLUDES} ${CONF}
+ $(CC) -o $@ ${HOST_SOURCES} ${HOST_FLAGS}
+
+${DPU_TARGET}: ${DPU_SOURCES} ${COMMON_INCLUDES} ${CONF}
+ dpu-upmem-dpurte-clang ${DPU_FLAGS} -o $@ ${DPU_SOURCES}
+
+clean:
+ $(RM) -r $(BUILDDIR)
+
+test: all
+ ./${HOST_TARGET}
diff --git a/UNI/baselines/cpu/Makefile b/UNI/baselines/cpu/Makefile
new file mode 100644
index 0000000..a1b4766
--- /dev/null
+++ b/UNI/baselines/cpu/Makefile
@@ -0,0 +1,6 @@
+all:
+ gcc -o uni -fopenmp app_baseline.c
+
+clean:
+ rm uni
+
diff --git a/UNI/baselines/cpu/README b/UNI/baselines/cpu/README
new file mode 100644
index 0000000..4263de5
--- /dev/null
+++ b/UNI/baselines/cpu/README
@@ -0,0 +1,12 @@
+Unique (UNI)
+
+Compilation instructions
+
+ make
+
+Execution instructions
+
+ ./uni -i 1258291200 -t 4
+
+Read more
+J. Gomez-Luna et al., “In-place Data Sliding Algorithms for Many-core Architectures,” ICPP 2015.
diff --git a/UNI/baselines/cpu/app_baseline.c b/UNI/baselines/cpu/app_baseline.c
new file mode 100644
index 0000000..9d3184c
--- /dev/null
+++ b/UNI/baselines/cpu/app_baseline.c
@@ -0,0 +1,143 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <assert.h>
+#include <stdint.h>
+
+#include <omp.h>
+#include "../../support/timer.h"
+
+#define T int64_t
+
+static int pos;
+
+static T *A;
+static T *B;
+static T *C;
+static T *C2;
+
+// Create a "test file"
+static T *create_test_file(unsigned int nr_elements) {
+ //srand(0);
+
+ A = (T*) malloc(nr_elements * sizeof(T));
+ B = (T*) malloc(nr_elements * sizeof(T));
+ C = (T*) malloc(nr_elements * sizeof(T));
+
+ printf("nr_elements\t%u\t", nr_elements);
+ for (int i = 0; i < nr_elements; i++) {
+ //A[i] = (unsigned int) (rand());
+ //A[i] = i+1;
+ //A[i] = i%2==0?i+1:i;
+ A[i] = i%2==0?i:i+1;
+ B[i] = 0;
+ }
+
+ return A;
+}
+
+// Compute output in the host
+static int unique_host(int size, int t) {
+ pos = 0;
+ C[pos] = A[pos];
+
+ omp_set_num_threads(t);
+ #pragma omp parallel for
+ for(int my = 1; my < size; my++) {
+ if(A[my] != A[my-1]) {
+ int p;
+ #pragma omp atomic update
+ pos++;
+ p = pos;
+ C[p] = A[my];
+ }
+ }
+
+ return pos;
+}
+
+// Params
+typedef struct Params {
+ int input_size;
+ int n_warmup;
+ int n_reps;
+ int n_threads;
+}Params;
+
+void usage() {
+ fprintf(stderr,
+ "\nUsage: ./program [options]"
+ "\n"
+ "\nGeneral options:"
+ "\n -h help"
+ "\n -t <T> # of threads (default=8)"
+ "\n -w <W> # of untimed warmup iterations (default=1)"
+ "\n -e <E> # of timed repetition iterations (default=3)"
+ "\n"
+ "\nBenchmark-specific options:"
+ "\n -i <I> input size (default=8M elements)"
+ "\n");
+}
+
+struct Params input_params(int argc, char **argv) {
+ struct Params p;
+ p.input_size = 16 << 20;
+ p.n_warmup = 1;
+ p.n_reps = 3;
+ p.n_threads = 8;
+
+ int opt;
+ while((opt = getopt(argc, argv, "hd:i:w:e:t:")) >= 0) {
+ switch(opt) {
+ case 'h':
+ usage();
+ exit(0);
+ break;
+ case 'i': p.input_size = atoi(optarg); break;
+ case 'w': p.n_warmup = atoi(optarg); break;
+ case 'e': p.n_reps = atoi(optarg); break;
+ case 't': p.n_threads = atoi(optarg); break;
+ default:
+ fprintf(stderr, "\nUnrecognized option!\n");
+ usage();
+ exit(0);
+ }
+ }
+ assert(p.n_threads > 0 && "Invalid # of ranks!");
+
+ return p;
+}
+
+// Main
+int main(int argc, char **argv) {
+
+ struct Params p = input_params(argc, argv);
+
+ const unsigned int file_size = p.input_size;
+ uint32_t accum = 0;
+ int total_count;
+
+ // Create an input file with arbitrary data
+ create_test_file(file_size);
+
+ Timer timer;
+ start(&timer, 0, 0);
+
+ total_count = unique_host(file_size, p.n_threads);
+
+ stop(&timer, 0);
+
+ printf("Total count = %d\t", total_count);
+
+ printf("Kernel ");
+ print(&timer, 0, 1);
+ printf("\n");
+
+ free(A);
+ free(B);
+ free(C);
+ return 0;
+ }
diff --git a/UNI/baselines/gpu/Makefile b/UNI/baselines/gpu/Makefile
new file mode 100644
index 0000000..e8412b7
--- /dev/null
+++ b/UNI/baselines/gpu/Makefile
@@ -0,0 +1,5 @@
+all:
+ /usr/local/cuda/bin/nvcc unique.cu -I/usr/local/cuda/include -lm -o unique -D COARSENING=32 -D THREADS=512 -D INT64
+
+clean:
+ rm unique
diff --git a/UNI/baselines/gpu/README b/UNI/baselines/gpu/README
new file mode 100644
index 0000000..bee7280
--- /dev/null
+++ b/UNI/baselines/gpu/README
@@ -0,0 +1,20 @@
+Unique (UNI)
+
+Compilation instructions
+
+ make
+
+Execution instructions
+
+ ./unique 0 50 1258291200
+
+Compilation flags
+
+ FLOAT - For single precision arrays (Default: Double precision)
+ INT - For integer arrays
+ THREADS - Thread block size (Default: 1024)
+ COARSENING - Coarsening factor (Default: 16 (SP and INT); 8 (DP))
+ ATOMIC - Global atomics for synchronization (Default: No atomics)
+
+Read more
+J. Gomez-Luna et al., “In-place Data Sliding Algorithms for Many-core Architectures,” ICPP 2015.
diff --git a/UNI/baselines/gpu/ds.h b/UNI/baselines/gpu/ds.h
new file mode 100644
index 0000000..1fa73de
--- /dev/null
+++ b/UNI/baselines/gpu/ds.h
@@ -0,0 +1,293 @@
+/***************************************************************************
+ *cr
+ *cr (C) Copyright 2015 The Board of Trustees of the
+ *cr University of Illinois
+ *cr All Rights Reserved
+ *cr
+ ***************************************************************************/
+/*
+ In-Place Data Sliding Algorithms for Many-Core Architectures, presented in ICPP’15
+
+ Copyright (c) 2015 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+
+ Permission to use, copy, modify and distribute this software and its documentation for
+ educational purpose is hereby granted without fee, provided that the above copyright
+ notice and this permission notice appear in all copies of this software and that you do
+ not sell the software.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
+ OTHERWISE.
+
+ Authors: Juan Gómez-Luna (el1goluj@uco.es, gomezlun@illinois.edu), Li-Wen Chang (lchang20@illinois.edu)
+*/
+
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <ctime>
+#include <cstdio>
+#include <math.h>
+#include <sys/time.h>
+#include <vector>
+
+#ifdef FLOAT
+#define T float
+#elif INT
+#define T int
+#elif INT64
+#define T int64_t
+#else
+#define T double
+#endif
+
+#ifdef THREADS
+#define L_DIM THREADS
+#else
+#define L_DIM 1024
+#endif
+
+#ifdef COARSENING
+#define REGS COARSENING
+#else
+#ifdef FLOAT
+#define REGS 16
+#elif INT
+#define REGS 16
+#else
+#define REGS 8
+#endif
+#endif
+
+#ifdef ATOMIC
+#define ATOM 1
+#else
+#define ATOM 0
+#endif
+
+#define WARP_SIZE 32
+
+#define PRINT 0
+
+// Dynamic allocation of runtime workgroup id
+__device__ int dynamic_wg_id(volatile unsigned int *flags, const int num_flags){
+ __shared__ int gid_;
+ if (threadIdx.x == 0) gid_ = atomicAdd((unsigned int*)&flags[num_flags + 1], 1);
+ __syncthreads();
+ int my_s = gid_;
+ return my_s;
+}
+
+// Set global synchronization (regular DS)
+__device__ void ds_sync(volatile unsigned int *flags, const int my_s){
+#if ATOM
+ if (threadIdx.x == 0){
+ while (atomicOr((unsigned int*)&flags[my_s], 0) == 0){}
+ atomicOr((unsigned int*)&flags[my_s + 1], 1);
+ }
+#else
+ if (threadIdx.x == 0){
+ while (flags[my_s] == 0){}
+ flags[my_s + 1] = 1;
+ }
+#endif
+ __syncthreads();
+}
+
+// Set global synchronization (irregular DS)
+__device__ void ds_sync_irregular(volatile unsigned int *flags, const int my_s, int *count){
+#if ATOM
+ if (threadIdx.x == 0){
+ while (atomicOr((unsigned int*)&flags[my_s], 0) == 0){}
+ int flag = flags[my_s];
+ atomicAdd((unsigned int*)&flags[my_s + 1], flag + *count);
+ *count = flag - 1;
+ }
+#else
+ if (threadIdx.x == 0){
+ while (flags[my_s] == 0){}
+ int flag = flags[my_s];
+ flags[my_s + 1] = flag + *count;
+ *count = flag - 1;
+ }
+#endif
+ __syncthreads();
+}
+
+// Set global synchronization (irregular DS Partition)
+__device__ void ds_sync_irregular_partition(volatile unsigned int *flags1, volatile unsigned int *flags2, const int my_s, int *count1, int *count2){
+#if ATOM
+ if (threadIdx.x == 0){
+ while (atomicOr((unsigned int*)&flags1[my_s], 0) == 0){}
+ int flag2 = flags2[my_s];
+ atomicAdd((unsigned int*)&flags2[my_s + 1], flag2 + *count);
+ int flag1 = flags1[my_s];
+ atomicAdd((unsigned int*)&flags1[my_s + 1], flag1 + *count);
+ *count1 = flag1 - 1;
+ *count2 = flag2 - 1;
+ }
+#else
+ if (threadIdx.x == 0){
+ while (flags1[my_s] == 0){}
+ int flag2 = flags2[my_s];
+ flags2[my_s + 1] = flag2 + *count2;
+ int flag1 = flags1[my_s];
+ flags1[my_s + 1] = flag1 + *count1;
+ *count1 = flag1 - 1;
+ *count2 = flag2 - 1;
+ }
+#endif
+ __syncthreads();
+}
+
+// Reduction kernel (CUDA SDK reduce6)
+template <class S>
+__device__ void reduction(S *count, S local_cnt){
+ __shared__ S sdata[L_DIM];
+
+ unsigned int tid = threadIdx.x;
+ S mySum = local_cnt;
+
+ // each thread puts its local sum into shared memory
+ sdata[tid] = local_cnt;
+ __syncthreads();
+
+ // do reduction in shared mem
+ if ((blockDim.x >= 1024) && (tid < 512)){
+ sdata[tid] = mySum = mySum + sdata[tid + 512];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 512) && (tid < 256)){
+ sdata[tid] = mySum = mySum + sdata[tid + 256];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 256) && (tid < 128)){
+ sdata[tid] = mySum = mySum + sdata[tid + 128];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 128) && (tid < 64)){
+ sdata[tid] = mySum = mySum + sdata[tid + 64];
+ }
+ __syncthreads();
+
+#if (__CUDA_ARCH__ >= 300 )
+ if ( tid < 32 ){
+ // Fetch final intermediate sum from 2nd warp
+ if (blockDim.x >= 64) mySum += sdata[tid + 32];
+ // Reduce final warp using shuffle
+ #pragma unroll
+ for (int offset = WARP_SIZE/2; offset > 0; offset /= 2){
+ //mySum += __shfl_down(mySum, offset);
+ mySum += __shfl_xor(mySum, offset);
+ }
+ }
+#else
+ // fully unroll reduction within a single warp
+ if ((blockDim.x >= 64) && (tid < 32)){
+ sdata[tid] = mySum = mySum + sdata[tid + 32];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 32) && (tid < 16)){
+ sdata[tid] = mySum = mySum + sdata[tid + 16];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 16) && (tid < 8)){
+ sdata[tid] = mySum = mySum + sdata[tid + 8];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 8) && (tid < 4)){
+ sdata[tid] = mySum = mySum + sdata[tid + 4];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 4) && (tid < 2)){
+ sdata[tid] = mySum = mySum + sdata[tid + 2];
+ }
+ __syncthreads();
+
+ if ((blockDim.x >= 2) && ( tid < 1)){
+ sdata[tid] = mySum = mySum + sdata[tid + 1];
+ }
+ __syncthreads();
+#endif
+
+ // write result for this block to global mem
+ if (tid == 0) *count = mySum;
+}
+
+// Binary prefix-sum (GPU Computing Gems)
+__device__ inline int lane_id(void) { return threadIdx.x % WARP_SIZE; }
+__device__ inline int warp_id(void) { return threadIdx.x / WARP_SIZE; }
+
+__device__ unsigned int warp_prefix_sums(bool p){
+ unsigned int b = __ballot(p);
+ return __popc(b & ((1 << lane_id()) - 1));
+}
+
+__device__ int warp_scan(int val, volatile int *s_data){
+#if (__CUDA_ARCH__ < 300 )
+ int idx = 2 * threadIdx.x - (threadIdx.x & (WARP_SIZE - 1));
+ s_data[idx] = 0;
+ idx += WARP_SIZE;
+ int t = s_data[idx] = val;
+ s_data[idx] = t = t + s_data[idx - 1];
+ s_data[idx] = t = t + s_data[idx - 2];
+ s_data[idx] = t = t + s_data[idx - 4];
+ s_data[idx] = t = t + s_data[idx - 8];
+ s_data[idx] = t = t + s_data[idx - 16];
+ return s_data[idx - 1];
+#else
+ int x = val;
+ #pragma unroll
+ for(int offset = 1; offset < 32; offset <<= 1){
+ // From GTC: Kepler shuffle tips and tricks:
+#if 0
+ int y = __shfl_up(x, offset);
+ if(lane_id() >= offset)
+ x += y;
+#else
+ asm volatile("{"
+ " .reg .s32 r0;"
+ " .reg .pred p;"
+ " shfl.up.b32 r0|p, %0, %1, 0x0;"
+ " @p add.s32 r0, r0, %0;"
+ " mov.s32 %0, r0;"
+ "}" : "+r"(x) : "r"(offset));
+#endif
+ }
+ return x - val;
+#endif
+}
+
+__device__ int block_binary_prefix_sums(int* count, int x){
+
+ __shared__ int sdata[L_DIM];
+
+ // A. Exclusive scan within each warp
+ int warpPrefix = warp_prefix_sums(x);
+
+ // B. Store in shared memory
+ if(lane_id() == WARP_SIZE - 1)
+ sdata[warp_id()] = warpPrefix + x;
+ __syncthreads();
+
+ // C. One warp scans in shared memory
+ if(threadIdx.x < WARP_SIZE)
+ sdata[threadIdx.x] = warp_scan(sdata[threadIdx.x], sdata);
+ __syncthreads();
+
+ // D. Each thread calculates it final value
+ int thread_out_element = warpPrefix + sdata[warp_id()];
+ int output = thread_out_element + *count;
+ __syncthreads();
+ if(threadIdx.x == blockDim.x - 1)
+ *count += (thread_out_element + x);
+
+ return output;
+}
diff --git a/UNI/baselines/gpu/kernel.cu b/UNI/baselines/gpu/kernel.cu
new file mode 100644
index 0000000..5cd9ea4
--- /dev/null
+++ b/UNI/baselines/gpu/kernel.cu
@@ -0,0 +1,91 @@
+/***************************************************************************
+ *cr
+ *cr (C) Copyright 2015 The Board of Trustees of the
+ *cr University of Illinois
+ *cr All Rights Reserved
+ *cr
+ ***************************************************************************/
+/*
+ In-Place Data Sliding Algorithms for Many-Core Architectures, presented in ICPP’15
+
+ Copyright (c) 2015 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+
+ Permission to use, copy, modify and distribute this software and its documentation for
+ educational purpose is hereby granted without fee, provided that the above copyright
+ notice and this permission notice appear in all copies of this software and that you do
+ not sell the software.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
+ OTHERWISE.
+
+ Authors: Juan Gómez-Luna (el1goluj@uco.es, gomezlun@illinois.edu), Li-Wen Chang (lchang20@illinois.edu)
+*/
+
+__device__ T warp_up(T reg, int delta){
+ __shared__ volatile T R[L_DIM];
+
+ R[threadIdx.x] = reg;
+
+ return (lane_id() - delta >= 0 ? R[threadIdx.x - delta] : 0);
+}
+__device__ T __shuffle_up(T* matrix, int my_s, int pos, T regi, int i){
+#if (__CUDA_ARCH__ >= 300 )
+ T p = __shfl_up(regi, 1);
+#else
+ T p = warp_up(regi, 1);
+#endif
+ if(lane_id() == 0 && i > 0)
+ p = matrix[pos - 1];
+ if(lane_id() == 0 && threadIdx.x != 0 && i == 0)
+ p = matrix[pos - 1];
+ if(my_s > 0 && threadIdx.x == 0 && i == 0)
+ p = matrix[pos - 1];
+ if(my_s == 0 && threadIdx.x == 0 && i == 0)
+ p = -1;
+ return p;
+}
+
+__global__ void unique(T *matrix_out, T *matrix,
+ int size,
+ volatile unsigned int *flags)
+{
+ __shared__ int count; // Counter for number of non-zero elements per block
+ const int num_flags = size % (blockDim.x * REGS) == 0 ? size / (blockDim.x * REGS) : size / (blockDim.x * REGS) + 1;
+
+ // Dynamic allocation of runtime workgroup id
+ if (threadIdx.x == 0) count = 0;
+ const int my_s = dynamic_wg_id(flags, num_flags);
+
+ int local_cnt = 0;
+ // Declare on-chip memory
+ T reg[REGS];
+ int pos = my_s * REGS * blockDim.x + threadIdx.x;
+ // Load in on-chip memory
+ #pragma unroll
+ for (int j = 0; j < REGS; j++){
+ if (pos < size){
+ reg[j] = matrix[pos];
+ if(reg[j] != __shuffle_up(matrix, my_s, pos, reg[j], j))
+ local_cnt++;
+ else
+ reg[j] = -1;
+ }
+ else
+ reg[j] = -1;
+ pos += blockDim.x;
+ }
+ reduction<int>(&count, local_cnt);
+
+ // Set global synch
+ ds_sync_irregular(flags, my_s, &count);
+
+ // Store to global memory
+ #pragma unroll
+ for (int j = 0; j < REGS; j++){
+ pos = block_binary_prefix_sums(&count, reg[j] >= 0);
+ if (reg[j] >= 0){
+ matrix_out[pos] = reg[j];
+ }
+ }
+}
diff --git a/UNI/baselines/gpu/unique.cu b/UNI/baselines/gpu/unique.cu
new file mode 100644
index 0000000..12d21fc
--- /dev/null
+++ b/UNI/baselines/gpu/unique.cu
@@ -0,0 +1,197 @@
+/***************************************************************************
+ *cr
+ *cr (C) Copyright 2015 The Board of Trustees of the
+ *cr University of Illinois
+ *cr All Rights Reserved
+ *cr
+ ***************************************************************************/
+/*
+ In-Place Data Sliding Algorithms for Many-Core Architectures, presented in ICPP’15
+
+ Copyright (c) 2015 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+
+ Permission to use, copy, modify and distribute this software and its documentation for
+ educational purpose is hereby granted without fee, provided that the above copyright
+ notice and this permission notice appear in all copies of this software and that you do
+ not sell the software.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
+ OTHERWISE.
+
+ Authors: Juan Gómez-Luna (el1goluj@uco.es, gomezlun@illinois.edu), Li-Wen Chang (lchang20@illinois.edu)
+*/
+
+#include "ds.h"
+#include "kernel.cu"
+
+// Sequential CPU version
+void cpu_unique(T* output, T* input, int elements){
+ int j = 0;
+ output[j] = input[j];
+ j++;
+ for (int i = 1; i < elements; i++){
+ if (input[i] != input[i-1]){
+ output[j] = input[i];
+ j++;
+ }
+ }
+}
+
+int main(int argc, char **argv){
+
+ // Syntax verification
+ if (argc != 4) {
+ printf("Wrong format\n");
+ printf("Syntax: %s <Device Input (%% elements) numElements>\n",argv[0]);
+ exit(1);
+ }
+ int device = atoi(argv[1]);
+ int input = atoi(argv[2]);
+ int numElements = atoi(argv[3]);
+ size_t size = numElements * sizeof(T);
+
+ // Set device
+ cudaDeviceProp device_properties;
+ cudaGetDeviceProperties(&device_properties,device);
+ cudaSetDevice(device);
+
+ printf("DS Unique on %s\n", device_properties.name);
+ printf("Thread block size = %d\n", L_DIM);
+ printf("Coarsening factor = %d\n", REGS);
+#ifdef FLOAT
+ printf("Single precision array: %d elements\n", numElements);
+#elif INT
+ printf("Integer array: %d elements\n", numElements);
+#else
+ printf("Double precision array: %d elements\n", numElements);
+#endif
+
+ // Event creation
+ cudaEvent_t start, stop;
+ cudaEventCreate(&start);
+ cudaEventCreate(&stop);
+
+ float time1 = 0;
+ float time2 = 0;
+
+ // Allocate the host input vector A
+ T *h_A = (T*)malloc(size);
+
+ // Allocate the host output vectors
+ T *h_B = (T*)malloc(size);
+ T *h_C = (T*)malloc(size);
+
+ // Allocate the device input vector A
+ T *d_A = NULL;
+ cudaMalloc((void **)&d_A, size);
+
+#define WARMUP 0
+#define REP 1
+ int value1 = 0;
+ int value2 = 1;
+ int value3 = 2;
+ int value4 = 3;
+ unsigned int flagM = 0;
+ for(int iteration = 0; iteration < REP+WARMUP; iteration++){
+ // Initialize the host input vectors
+ srand(2014);
+ for(int i = 0; i < numElements; i++){
+ h_A[i] = value1;
+ if(i >= numElements/4 && i < numElements/2) h_A[i] = value2;
+ if(i >= numElements/2 && i < 3*numElements/4) h_A[i] = value3;
+ if(i >= 3*numElements/4 && i < numElements) h_A[i] = value4;
+ }
+ int M = (numElements * input)/100;
+ int m = M;
+ while(m>0){
+ int x = (int)(numElements*(((float)rand()/(float)RAND_MAX)));
+ if(h_A[x]==value1 || h_A[x]==value2 || h_A[x]==value3 || h_A[x]==value4){
+ h_A[x] = x+2;
+ m--;
+ }
+ }
+
+#if PRINT
+ printf("\n");
+ for(int i = 0; i < numElements; ++i){
+ printf("%d ",*(h_A+i));
+ }
+ printf("\n");
+#endif
+
+ // Copy the host input vector A in host memory to the device input vector in device memory
+ cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
+
+ int ldim = L_DIM;
+ // Atomic flags
+ unsigned int* d_flags = NULL;
+ int num_flags = numElements % (ldim * REGS) == 0 ? numElements / (ldim * REGS) : numElements / (ldim * REGS) + 1;
+ unsigned int *flags = (unsigned int *)calloc(sizeof(unsigned int), num_flags + 2);
+ flags[0] = 1;
+ flags[num_flags + 1] = 0;
+ cudaMalloc((void **)&d_flags, (num_flags + 2) * sizeof(unsigned int));
+ cudaMemcpy(d_flags, flags, (num_flags + 2) * sizeof(unsigned int), cudaMemcpyHostToDevice);
+ free(flags);
+ // Number of work-groups/thread blocks
+ int num_wg = num_flags;
+
+ // Start timer
+ cudaEventRecord( start, 0 );
+
+ // Kernel launch
+ unique<<<num_wg, ldim>>>(d_A, d_A, numElements, d_flags);
+
+ cudaMemcpy(&flagM, d_flags + num_flags, sizeof(unsigned int), cudaMemcpyDeviceToHost);
+
+ // End timer
+ cudaEventRecord( stop, 0 );
+ cudaEventSynchronize( stop );
+ cudaEventElapsedTime( &time1, start, stop );
+ if(iteration >= WARMUP) time2 += time1;
+
+ if(iteration == REP+WARMUP-1){
+ float timer = time2 / REP;
+ double bw = (double)((numElements + flagM) * sizeof(T)) / (double)(timer * 1000000.0);
+ printf("Execution time = %f ms, Throughput = %f GB/s\n", timer, bw);
+ }
+
+ // Free flags
+ cudaFree(d_flags);
+ }
+ // Copy to host memory
+ cudaMemcpy(h_B, d_A, size, cudaMemcpyDeviceToHost);
+
+ // CPU execution for comparison
+ cpu_unique(h_C, h_A, numElements);
+
+ // Verify that the result vector is correct
+#if PRINT
+ for(int i = 0; i < numElements; ++i){
+ printf("%d ",*(h_B+i));
+ }
+ printf("\n");
+ for(int i = 0; i < numElements; ++i){
+ printf("%d ",*(h_C+i));
+ }
+ printf("\n");
+#endif
+ for (int i = 0; i < flagM - 1; ++i){
+ if (h_B[i] != h_C[i]){
+ fprintf(stderr, "Result verification failed at element %d!\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+ printf("Test PASSED\n");
+
+ // Free device global memory
+ cudaFree(d_A);
+ cudaEventDestroy(start);
+ cudaEventDestroy(stop);
+ // Free host memory
+ free(h_A);
+ free(h_B);
+ free(h_C);
+
+ return 0;
+}
diff --git a/UNI/dpu/task.c b/UNI/dpu/task.c
new file mode 100644
index 0000000..658d3a1
--- /dev/null
+++ b/UNI/dpu/task.c
@@ -0,0 +1,147 @@
+/*
+* Unique with multiple tasklets
+*
+*/
+#include <stdint.h>
+#include <stdio.h>
+#include <defs.h>
+#include <mram.h>
+#include <alloc.h>
+#include <perfcounter.h>
+#include <handshake.h>
+#include <barrier.h>
+
+#include "../support/common.h"
+
+__host dpu_arguments_t DPU_INPUT_ARGUMENTS;
+__host dpu_results_t DPU_RESULTS[NR_TASKLETS];
+
+// Array for communication between adjacent tasklets
+uint32_t message[NR_TASKLETS];
+T message_value[NR_TASKLETS];
+uint32_t message_offset[NR_TASKLETS];
+uint32_t message_partial_count;
+T message_last_from_last;
+
+// UNI in each tasklet
+static unsigned int unique(T *output, T *input){
+ unsigned int pos = 0;
+ output[pos] = input[pos];
+ pos++;
+ #pragma unroll
+ for(unsigned int j = 1; j < REGS; j++) {
+ if(input[j] != input[j - 1]) {
+ output[pos] = input[j];
+ pos++;
+ }
+ }
+ return pos;
+}
+
+// Handshake with adjacent tasklets
+static uint3 handshake_sync(T *output, unsigned int l_count, unsigned int tasklet_id){
+ unsigned int p_count, o_count, offset;
+ // Wait and read message
+ if(tasklet_id != 0){
+ handshake_wait_for(tasklet_id - 1);
+ p_count = message[tasklet_id];
+ offset = (message_value[tasklet_id] == output[0])?1:0;
+ o_count = message_offset[tasklet_id];
+ }
+ else{
+ p_count = 0;
+ offset = (message_last_from_last == output[0])?1:0;
+ o_count = 0;
+ }
+ // Write message and notify
+ if(tasklet_id < NR_TASKLETS - 1){
+ message[tasklet_id + 1] = p_count + l_count;
+ message_value[tasklet_id + 1] = output[l_count - 1];
+ message_offset[tasklet_id + 1] = o_count + offset;
+ handshake_notify();
+ }
+ uint3 result = {p_count, o_count, offset};
+ return result;
+}
+
+// Barrier
+BARRIER_INIT(my_barrier, NR_TASKLETS);
+
+extern int main_kernel1(void);
+
+int (*kernels[nr_kernels])(void) = {main_kernel1};
+
+int main(void) {
+ // Kernel
+ return kernels[DPU_INPUT_ARGUMENTS.kernel]();
+}
+
+// main_kernel1
+int main_kernel1() {
+ unsigned int tasklet_id = me();
+#if PRINT
+ printf("tasklet_id = %u\n", tasklet_id);
+#endif
+ if (tasklet_id == 0){ // Initialize once the cycle counter
+ mem_reset(); // Reset the heap
+ }
+ // Barrier
+ barrier_wait(&my_barrier);
+
+ dpu_results_t *result = &DPU_RESULTS[tasklet_id];
+
+ uint32_t input_size_dpu_bytes = DPU_INPUT_ARGUMENTS.size; // Input size per DPU in bytes
+
+ // Address of the current processing block in MRAM
+ uint32_t base_tasklet = tasklet_id << BLOCK_SIZE_LOG2;
+ uint32_t mram_base_addr_A = (uint32_t)DPU_MRAM_HEAP_POINTER;
+ uint32_t mram_base_addr_B = (uint32_t)(DPU_MRAM_HEAP_POINTER + input_size_dpu_bytes);
+
+ // Initialize a local cache to store the MRAM block
+ T *cache_A = (T *) mem_alloc(BLOCK_SIZE);
+ T *cache_B = (T *) mem_alloc(BLOCK_SIZE);
+
+ // Initialize shared variable
+ if(tasklet_id == NR_TASKLETS - 1){
+ message_partial_count = 0;
+ message_last_from_last = 0xFFFFFFFF; // A value that is not in the input array
+ }
+ // Barrier
+ barrier_wait(&my_barrier);
+
+ unsigned int i = 0; // Iteration count
+ for(unsigned int byte_index = base_tasklet; byte_index < input_size_dpu_bytes; byte_index += BLOCK_SIZE * NR_TASKLETS){
+
+ // Load cache with current MRAM block
+ mram_read((__mram_ptr void const*)(mram_base_addr_A + byte_index), cache_A, BLOCK_SIZE);
+
+ // UNI in each tasklet
+ unsigned int l_count = unique(cache_B, cache_A); // In-place or out-of-place?
+
+ // Sync with adjacent tasklets
+ uint3 po_count = handshake_sync(cache_B, l_count, tasklet_id);
+
+ // Write cache to current MRAM block
+ mram_write(&cache_B[po_count.z], (__mram_ptr void*)(mram_base_addr_B + (message_partial_count + po_count.x - po_count.y) * sizeof(T)), l_count * sizeof(T));
+
+ // First
+ if(tasklet_id == 0 && i == 0){
+ result->first = cache_B[0];
+ }
+
+ // Total count in this DPU
+ if(tasklet_id == NR_TASKLETS - 1){
+ message_last_from_last = cache_B[l_count - 1];
+ result->last = cache_B[l_count - 1];
+ result->t_count = message_partial_count + po_count.x + l_count - po_count.y - po_count.z;
+ message_partial_count = result->t_count;
+ }
+
+ // Barrier
+ barrier_wait(&my_barrier);
+
+ i++;
+ }
+
+ return 0;
+}
diff --git a/UNI/host/app.c b/UNI/host/app.c
new file mode 100644
index 0000000..1b91bba
--- /dev/null
+++ b/UNI/host/app.c
@@ -0,0 +1,277 @@
+/**
+* app.c
+* UNI Host Application Source File
+*
+*/
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <dpu.h>
+#include <dpu_log.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <assert.h>
+
+#include "../support/common.h"
+#include "../support/timer.h"
+#include "../support/params.h"
+
+// Define the DPU Binary path as DPU_BINARY here
+#ifndef DPU_BINARY
+#define DPU_BINARY "./bin/dpu_code"
+#endif
+
+#if ENERGY
+#include <dpu_probe.h>
+#endif
+
+// Pointer declaration
+static T* A;
+static T* C;
+static T* C2;
+
+// Create input arrays
+static void read_input(T* A, unsigned int nr_elements, unsigned int nr_elements_round) {
+ //srand(0);
+ printf("nr_elements\t%u\t", nr_elements);
+ for (unsigned int i = 0; i < nr_elements; i++) {
+ //A[i] = (T) (rand());
+ A[i] = i%2==0?i:i+1;
+ }
+ for (unsigned int i = nr_elements; i < nr_elements_round; i++) {
+ A[i] = A[nr_elements - 1];
+ }
+}
+
+// Compute output in the host
+static unsigned int unique_host(T* C, T* A, unsigned int nr_elements) {
+ unsigned int pos = 0;
+ C[pos] = A[pos];
+ pos++;
+ for(unsigned int i = 1; i < nr_elements; i++) {
+ if(A[i] != A[i-1]) {
+ C[pos] = A[i];
+ pos++;
+ }
+ }
+ return pos;
+}
+
+// Main of the Host Application
+int main(int argc, char **argv) {
+
+ struct Params p = input_params(argc, argv);
+
+ struct dpu_set_t dpu_set, dpu;
+ uint32_t nr_of_dpus;
+
+#if ENERGY
+ struct dpu_probe_t probe;
+ DPU_ASSERT(dpu_probe_init("energy_probe", &probe));
+#endif
+
+ // Allocate DPUs and load binary
+ DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set));
+ DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL));
+ DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus));
+ printf("Allocated %d DPU(s)\n", nr_of_dpus);
+
+ unsigned int i = 0;
+ uint32_t accum = 0;
+ uint32_t total_count = 0;
+
+ const unsigned int input_size = p.exp == 0 ? p.input_size * nr_of_dpus : p.input_size; // Total input size (weak or strong scaling)
+ const unsigned int input_size_dpu_ = divceil(input_size, nr_of_dpus); // Input size per DPU (max.)
+ const unsigned int input_size_dpu_round =
+ (input_size_dpu_ % (NR_TASKLETS * REGS) != 0) ? roundup(input_size_dpu_, (NR_TASKLETS * REGS)) : input_size_dpu_; // Input size per DPU (max.), 8-byte aligned
+
+ // Input/output allocation
+ A = malloc(input_size_dpu_round * nr_of_dpus * sizeof(T));
+ C = malloc(input_size_dpu_round * nr_of_dpus * sizeof(T));
+ C2 = malloc(input_size_dpu_round * nr_of_dpus * sizeof(T));
+ T *bufferA = A;
+ T *bufferC = C2;
+
+ // Create an input file with arbitrary data
+ read_input(A, input_size, input_size_dpu_round * nr_of_dpus);
+
+ // Timer declaration
+ Timer timer;
+
+ printf("NR_TASKLETS\t%d\tBL\t%d\n", NR_TASKLETS, BL);
+
+ // Loop over main kernel
+ for(int rep = 0; rep < p.n_warmup + p.n_reps; rep++) {
+
+ // Compute output on CPU (performance comparison and verification purposes)
+ if(rep >= p.n_warmup)
+ start(&timer, 0, rep - p.n_warmup);
+ total_count = unique_host(C, A, input_size);
+ if(rep >= p.n_warmup)
+ stop(&timer, 0);
+
+ printf("Load input data\n");
+ if(rep >= p.n_warmup)
+ start(&timer, 1, rep - p.n_warmup);
+ // Input arguments
+ const unsigned int input_size_dpu = input_size_dpu_round;
+ unsigned int kernel = 0;
+ dpu_arguments_t input_arguments = {input_size_dpu * sizeof(T), kernel};
+ // Copy input arrays
+ i = 0;
+ DPU_FOREACH(dpu_set, dpu, i) {
+ DPU_ASSERT(dpu_prepare_xfer(dpu, &input_arguments));
+ }
+ DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(input_arguments), DPU_XFER_DEFAULT));
+ DPU_FOREACH(dpu_set, dpu, i) {
+ DPU_ASSERT(dpu_prepare_xfer(dpu, bufferA + input_size_dpu * i));
+ }
+ DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, input_size_dpu * sizeof(T), DPU_XFER_DEFAULT));
+ if(rep >= p.n_warmup)
+ stop(&timer, 1);
+
+ printf("Run program on DPU(s) \n");
+ // Run DPU kernel
+ if(rep >= p.n_warmup) {
+ start(&timer, 2, rep - p.n_warmup);
+ #if ENERGY
+ DPU_ASSERT(dpu_probe_start(&probe));
+ #endif
+ }
+ DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS));
+ if(rep >= p.n_warmup) {
+ stop(&timer, 2);
+ #if ENERGY
+ DPU_ASSERT(dpu_probe_stop(&probe));
+ #endif
+ }
+
+#if PRINT
+ {
+ unsigned int each_dpu = 0;
+ printf("Display DPU Logs\n");
+ DPU_FOREACH (dpu_set, dpu) {
+ printf("DPU#%d:\n", each_dpu);
+ DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout));
+ each_dpu++;
+ }
+ }
+#endif
+
+ printf("Retrieve results\n");
+ dpu_results_t results[nr_of_dpus];
+ uint32_t* results_scan = malloc(nr_of_dpus * sizeof(uint32_t));
+ uint32_t* offset = calloc(nr_of_dpus, sizeof(uint32_t));
+ uint32_t* offset_scan = calloc(nr_of_dpus, sizeof(uint32_t));
+ i = 0;
+ accum = 0;
+
+ if(rep >= p.n_warmup)
+ start(&timer, 3, rep - p.n_warmup);
+ // PARALLEL RETRIEVE TRANSFER
+ dpu_results_t* results_retrieve[nr_of_dpus];
+
+ DPU_FOREACH(dpu_set, dpu, i) {
+ results_retrieve[i] = (dpu_results_t*)malloc(NR_TASKLETS * sizeof(dpu_results_t));
+ DPU_ASSERT(dpu_prepare_xfer(dpu, results_retrieve[i]));
+ }
+ DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, "DPU_RESULTS", 0, NR_TASKLETS * sizeof(dpu_results_t), DPU_XFER_DEFAULT));
+
+ DPU_FOREACH(dpu_set, dpu, i) {
+ // Retrieve tasklet timings
+ for (unsigned int each_tasklet = 0; each_tasklet < NR_TASKLETS; each_tasklet++) {
+ // First output element of this DPU
+ if(each_tasklet == 0){
+ results[i].first = results_retrieve[i][each_tasklet].first;
+ }
+ // Last output element of this DPU and count
+ if(each_tasklet == NR_TASKLETS - 1){
+ results[i].t_count = results_retrieve[i][each_tasklet].t_count;
+ results[i].last = results_retrieve[i][each_tasklet].last;
+ }
+ }
+ // Check if first(i) == last(i-1) -- offset
+ if(i != 0){
+ if(results[i].first == results[i - 1].last)
+ offset[i] = 1;
+ // Sequential scan - offset
+ offset_scan[i] += offset[i];
+ }
+ // Sequential scan
+ uint32_t temp = results[i].t_count - offset[i];
+ results_scan[i] = accum;
+ accum += temp;
+#if PRINT
+ printf("i=%d -- %u, %u, %u -- %u\n", i, results_scan[i], accum, temp, offset_scan[i]);
+#endif
+ free(results_retrieve[i]);
+ }
+ if(rep >= p.n_warmup)
+ stop(&timer, 3);
+
+ i = 0;
+ if(rep >= p.n_warmup)
+ start(&timer, 4, rep - p.n_warmup);
+ DPU_FOREACH (dpu_set, dpu) {
+ // Copy output array
+ DPU_ASSERT(dpu_copy_from(dpu, DPU_MRAM_HEAP_POINTER_NAME, input_size_dpu * sizeof(T), bufferC + results_scan[i] - offset_scan[i], results[i].t_count * sizeof(T)));
+
+ i++;
+ }
+ if(rep >= p.n_warmup)
+ stop(&timer, 4);
+
+ // Free memory
+ free(results_scan);
+ free(offset);
+ free(offset_scan);
+
+ }
+
+ // Print timing results
+ printf("CPU ");
+ print(&timer, 0, p.n_reps);
+ printf("CPU-DPU ");
+ print(&timer, 1, p.n_reps);
+ printf("DPU Kernel ");
+ print(&timer, 2, p.n_reps);
+ printf("Inter-DPU ");
+ print(&timer, 3, p.n_reps);
+ printf("DPU-CPU ");
+ print(&timer, 4, p.n_reps);
+
+#if ENERGY
+ double energy;
+ DPU_ASSERT(dpu_probe_get(&probe, DPU_ENERGY, DPU_AVERAGE, &energy));
+ printf("DPU Energy (J): %f\t", energy);
+#endif
+
+ // Check output
+ bool status = true;
+ if(accum != total_count) status = false;
+#if PRINT
+ printf("accum %u, total_count %u\n", accum, total_count);
+#endif
+ for (i = 0; i < accum; i++) {
+ if(C[i] != bufferC[i]){
+ status = false;
+#if PRINT
+ printf("%d: %lu -- %lu\n", i, C[i], bufferC[i]);
+#endif
+ }
+ }
+ if (status) {
+ printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n");
+ } else {
+ printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n");
+ }
+
+ // Deallocation
+ free(A);
+ free(C);
+ free(C2);
+ DPU_ASSERT(dpu_free(dpu_set));
+
+ return status ? 0 : -1;
+}
diff --git a/UNI/support/common.h b/UNI/support/common.h
new file mode 100755
index 0000000..0bf9853
--- /dev/null
+++ b/UNI/support/common.h
@@ -0,0 +1,46 @@
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+// Data type
+#define T int64_t
+#define REGS (BLOCK_SIZE >> 3) // 64 bits
+
+// Structures used by both the host and the dpu to communicate information
+typedef struct {
+ uint32_t size;
+ enum kernels {
+ kernel1 = 0,
+ nr_kernels = 1,
+ } kernel;
+} dpu_arguments_t;
+
+typedef struct {
+ uint32_t t_count;
+ T first;
+ T last;
+} dpu_results_t;
+
+// Transfer size between MRAM and WRAM
+#ifdef BL
+#define BLOCK_SIZE_LOG2 BL
+#define BLOCK_SIZE (1 << BLOCK_SIZE_LOG2)
+#else
+#define BLOCK_SIZE_LOG2 8
+#define BLOCK_SIZE (1 << BLOCK_SIZE_LOG2)
+#define BL BLOCK_SIZE_LOG2
+#endif
+
+typedef struct{unsigned int x; unsigned int y; unsigned int z;} uint3;
+
+#ifndef ENERGY
+#define ENERGY 0
+#endif
+#define PRINT 0
+
+#define ANSI_COLOR_RED "\x1b[31m"
+#define ANSI_COLOR_GREEN "\x1b[32m"
+#define ANSI_COLOR_RESET "\x1b[0m"
+
+#define divceil(n, m) (((n)-1) / (m) + 1)
+#define roundup(n, m) ((n / m) * m + m)
+#endif
diff --git a/UNI/support/params.h b/UNI/support/params.h
new file mode 100644
index 0000000..bb86211
--- /dev/null
+++ b/UNI/support/params.h
@@ -0,0 +1,56 @@
+#ifndef _PARAMS_H_
+#define _PARAMS_H_
+
+#include "common.h"
+
+typedef struct Params {
+ unsigned int input_size;
+ int n_warmup;
+ int n_reps;
+ int exp;
+}Params;
+
+static void usage() {
+ fprintf(stderr,
+ "\nUsage: ./program [options]"
+ "\n"
+ "\nGeneral options:"
+ "\n -h help"
+ "\n -w <W> # of untimed warmup iterations (default=1)"
+ "\n -e <E> # of timed repetition iterations (default=3)"
+ "\n -x <X> Weak (0) or strong (1) scaling (default=0)"
+ "\n"
+ "\nBenchmark-specific options:"
+ "\n -i <I> input size (default=3932160 elements)"
+ "\n");
+}
+
+struct Params input_params(int argc, char **argv) {
+ struct Params p;
+ p.input_size = 3932160;
+ p.n_warmup = 1;
+ p.n_reps = 3;
+ p.exp = 0;
+
+ int opt;
+ while((opt = getopt(argc, argv, "hi:w:e:x:")) >= 0) {
+ switch(opt) {
+ case 'h':
+ usage();
+ exit(0);
+ break;
+ case 'i': p.input_size = atoi(optarg); break;
+ case 'w': p.n_warmup = atoi(optarg); break;
+ case 'e': p.n_reps = atoi(optarg); break;
+ case 'x': p.exp = atoi(optarg); break;
+ default:
+ fprintf(stderr, "\nUnrecognized option!\n");
+ usage();
+ exit(0);
+ }
+ }
+ assert(NR_DPUS > 0 && "Invalid # of dpus!");
+
+ return p;
+}
+#endif
diff --git a/UNI/support/timer.h b/UNI/support/timer.h
new file mode 100755
index 0000000..b53d95f
--- /dev/null
+++ b/UNI/support/timer.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016 University of Cordoba and University of Illinois
+ * All rights reserved.
+ *
+ * Developed by: IMPACT Research Group
+ * University of Cordoba and University of Illinois
+ * http://impact.crhc.illinois.edu/
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * with the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * > Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimers.
+ * > Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimers in the
+ * documentation and/or other materials provided with the distribution.
+ * > Neither the names of IMPACT Research Group, University of Cordoba,
+ * University of Illinois nor the names of its contributors may be used
+ * to endorse or promote products derived from this Software without
+ * specific prior written permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
+ * THE SOFTWARE.
+ *
+ */
+
+#include <sys/time.h>
+
+typedef struct Timer{
+
+ struct timeval startTime[7];
+ struct timeval stopTime[7];
+ double time[7];
+
+}Timer;
+
+void start(Timer *timer, int i, int rep) {
+ if(rep == 0) {
+ timer->time[i] = 0.0;
+ }
+ gettimeofday(&timer->startTime[i], NULL);
+}
+
+void stop(Timer *timer, int i) {
+ gettimeofday(&timer->stopTime[i], NULL);
+ timer->time[i] += (timer->stopTime[i].tv_sec - timer->startTime[i].tv_sec) * 1000000.0 +
+ (timer->stopTime[i].tv_usec - timer->startTime[i].tv_usec);
+}
+
+void print(Timer *timer, int i, int REP) { printf("Time (ms): %f\t", timer->time[i] / (1000 * REP)); }