adding work groups support to spawntasks API

This commit is contained in:
Blaise Tine 2024-05-06 01:25:13 -07:00
parent 0cabd24f08
commit 79f5824c74
8 changed files with 581 additions and 246 deletions

View file

@ -21,33 +21,18 @@
extern "C" {
#endif
typedef struct {
uint32_t num_groups[3];
uint32_t global_offset[3];
uint32_t local_size[3];
uint32_t printf_buffer;
uint32_t printf_buffer_position;
uint32_t printf_buffer_capacity;
uint32_t work_dim;
} pocl_kernel_context_t;
typedef void (*pocl_kernel_cb) (
const void * /* arg */,
const pocl_kernel_context_t * /* context */,
uint32_t /* group_x */,
uint32_t /* group_y */,
uint32_t /* group_z */,
uint32_t /* local_offset */
);
typedef void (*vx_spawn_tasks_cb)(int task_id, void *arg);
typedef void (*vx_spawn_tasks_ex_cb)(int local_task_id, int group_id, void *arg);
typedef void (*vx_serial_cb)(void *arg);
void vx_spawn_pocl_kernel(pocl_kernel_context_t * ctx, pocl_kernel_cb callback, void * arg);
void vx_spawn_tasks(int num_tasks, vx_spawn_tasks_cb callback, void * arg);
void vx_spawn_tasks_ex(int num_groups, int group_size, vx_spawn_tasks_ex_cb callback, void * arg);
void vx_syncthreads(int barrier_id);
void vx_serial(vx_serial_cb callback, void * arg);
#ifdef __cplusplus

View file

@ -27,130 +27,120 @@ extern "C" {
typedef struct {
vx_spawn_tasks_cb callback;
void* arg;
int offset; // task offset
int remain; // remaining offset
int FWs; // number of NW batches where NW=<total warps per core>.
int RWs; // number of remaining warps in the core
int all_tasks_offset;
int remain_tasks_offset;
int warp_batches;
int remaining_warps;
} wspawn_tasks_args_t;
typedef struct {
pocl_kernel_context_t * ctx;
pocl_kernel_cb callback;
void* arg;
int local_size;
int offset; // task offset
int remain; // remaining offset
int FWs; // number of NW batches where NW=<total warps per core>.
int RWs; // number of remaining warps in the core
char isXYpow2;
char log2XY;
char log2X;
} wspawn_pocl_kernel_args_t;
static void __attribute__ ((noinline)) process_all_tasks() {
wspawn_tasks_args_t* targs = (wspawn_tasks_args_t*)csr_read(VX_CSR_MSCRATCH);
inline char is_log2(int x) {
return ((x & (x-1)) == 0);
}
int threads_per_warp = vx_num_threads();
int warp_id = vx_warp_id();
int thread_id = vx_thread_id();
inline int log2_fast(int x) {
return 31 - __builtin_clz (x);
}
int start_warp = (warp_id * targs->warp_batches) + MIN(warp_id, targs->remaining_warps);
int iterations = targs->warp_batches + (warp_id < targs->remaining_warps);
static void __attribute__ ((noinline)) spawn_tasks_all_stub() {
int NT = vx_num_threads();
int wid = vx_warp_id();
int tid = vx_thread_id();
int start_task_id = targs->all_tasks_offset + (start_warp * threads_per_warp) + thread_id;
int end_task_id = start_task_id + iterations * threads_per_warp;
wspawn_tasks_args_t* p_wspawn_args = (wspawn_tasks_args_t*)csr_read(VX_CSR_MSCRATCH);
int wK = (p_wspawn_args->FWs * wid) + MIN(p_wspawn_args->RWs, wid);
int tK = p_wspawn_args->FWs + (wid < p_wspawn_args->RWs);
int offset = p_wspawn_args->offset + (wK * NT) + tid;
vx_spawn_tasks_cb callback = p_wspawn_args->callback;
void* arg = p_wspawn_args->arg;
for (int task_id = offset, N = offset + tK * NT; task_id < N; task_id += NT) {
vx_spawn_tasks_cb callback = targs->callback;
void* arg = targs->arg;
for (int task_id = start_task_id; task_id < end_task_id; task_id += threads_per_warp) {
callback(task_id, arg);
}
}
static void __attribute__ ((noinline)) spawn_tasks_rem_stub() {
int tid = vx_thread_id();
static void __attribute__ ((noinline)) process_remaining_tasks() {
wspawn_tasks_args_t* targs = (wspawn_tasks_args_t*)csr_read(VX_CSR_MSCRATCH);
wspawn_tasks_args_t* p_wspawn_args = (wspawn_tasks_args_t*)csr_read(VX_CSR_MSCRATCH);
int task_id = p_wspawn_args->remain + tid;
(p_wspawn_args->callback)(task_id, p_wspawn_args->arg);
int thread_id = vx_thread_id();
int task_id = targs->remain_tasks_offset + thread_id;
(targs->callback)(task_id, targs->arg);
}
static void __attribute__ ((noinline)) spawn_tasks_all_cb() {
static void __attribute__ ((noinline)) process_all_tasks_stub() {
// activate all threads
vx_tmc(-1);
// call stub routine
spawn_tasks_all_stub();
// process all tasks
process_all_tasks();
// disable warp
vx_tmc_zero();
}
void vx_spawn_tasks(int num_tasks, vx_spawn_tasks_cb callback , void * arg) {
// device specs
int NC = vx_num_cores();
int NW = vx_num_warps();
int NT = vx_num_threads();
// device specifications
int num_cores = vx_num_cores();
int warps_per_core = vx_num_warps();
int threads_per_warp = vx_num_threads();
int core_id = vx_core_id();
// calculate necessary active cores
int WT = NW * NT;
int nC = (num_tasks > WT) ? (num_tasks / WT) : 1;
int nc = MIN(nC, NC);
if (core_id >= nc)
return; // terminate extra cores
int threads_per_core = warps_per_core * threads_per_warp;
int needed_cores = (num_tasks + threads_per_core - 1) / threads_per_core;
int active_cores = MIN(needed_cores, num_cores);
// only active cores participate
if (core_id >= active_cores)
return;
// number of tasks per core
int tasks_per_core = num_tasks / nc;
int tasks_per_core_n1 = tasks_per_core;
if (core_id == (nc-1)) {
int rem = num_tasks - (nc * tasks_per_core);
tasks_per_core_n1 += rem; // last core also executes remaining tasks
int tasks_per_core = num_tasks / active_cores;
int remaining_tasks_per_core = num_tasks - tasks_per_core * active_cores;
if (core_id < remaining_tasks_per_core)
tasks_per_core++;
// calculate number of warps to activate
int active_warps = tasks_per_core / threads_per_warp;
int remaining_tasks = tasks_per_core - active_warps * threads_per_warp;
int warp_batches = 1, remaining_warps = 0;
if (active_warps > warps_per_core) {
warp_batches = active_warps / warps_per_core;
remaining_warps = active_warps - warp_batches * warps_per_core;
}
// number of tasks per warp
int TW = tasks_per_core_n1 / NT; // occupied warps
int rT = tasks_per_core_n1 - TW * NT; // remaining threads
int fW = 1, rW = 0;
if (TW >= NW) {
fW = TW / NW; // full warps iterations
rW = TW - fW * NW; // remaining warps
}
// calculate offsets for task distribution
int all_tasks_offset = core_id * tasks_per_core + MIN(core_id, remaining_tasks_per_core);
int remain_tasks_offset = all_tasks_offset + (tasks_per_core - remaining_tasks);
int offset = core_id * tasks_per_core;
int remain = offset + (tasks_per_core_n1 - rT);
wspawn_tasks_args_t wspawn_args = {callback, arg, offset, remain, fW, rW};
// prepare scheduler arguments
wspawn_tasks_args_t wspawn_args = {
callback,
arg,
all_tasks_offset,
remain_tasks_offset,
warp_batches,
remaining_warps
};
csr_write(VX_CSR_MSCRATCH, &wspawn_args);
if (TW >= 1) {
if (active_warps >= 1) {
// execute callback on other warps
int nw = MIN(TW, NW);
vx_wspawn(nw, spawn_tasks_all_cb);
int num_total_warps = MIN(active_warps, warps_per_core);
vx_wspawn(num_total_warps, process_all_tasks_stub);
// activate all threads
vx_tmc(-1);
// call stub routine
spawn_tasks_all_stub();
// process all tasks
process_all_tasks();
// back to single-threaded
vx_tmc_one();
}
if (rT != 0) {
if (remaining_tasks != 0) {
// activate remaining threads
int tmask = (1 << rT) - 1;
int tmask = (1 << remaining_tasks) - 1;
vx_tmc(tmask);
// call stub routine
spawn_tasks_rem_stub();
// process remaining tasks
process_remaining_tasks();
// back to single-threaded
vx_tmc_one();
@ -162,162 +152,158 @@ void vx_spawn_tasks(int num_tasks, vx_spawn_tasks_cb callback , void * arg) {
///////////////////////////////////////////////////////////////////////////////
static void __attribute__ ((noinline)) spawn_pocl_kernel_all_stub() {
int NT = vx_num_threads();
int wid = vx_warp_id();
int tid = vx_thread_id();
typedef struct {
vx_spawn_tasks_ex_cb callback;
void* arg;
int group_offset;
int warp_batches;
int remaining_warps;
int warps_per_group;
int groups_per_core;
int remaining_mask;
int barrier_enabled;
} wspawn_tasks_ex_args_t;
wspawn_pocl_kernel_args_t* p_wspawn_args = (wspawn_pocl_kernel_args_t*)csr_read(VX_CSR_MSCRATCH);
pocl_kernel_context_t* ctx = p_wspawn_args->ctx;
void* arg = p_wspawn_args->arg;
static void __attribute__ ((noinline)) process_all_tasks_ex() {
wspawn_tasks_ex_args_t* targs = (wspawn_tasks_ex_args_t*)csr_read(VX_CSR_MSCRATCH);
int wK = (p_wspawn_args->FWs * wid) + MIN(p_wspawn_args->RWs, wid);
int tK = p_wspawn_args->FWs + (wid < p_wspawn_args->RWs);
int offset = p_wspawn_args->offset + (wK * NT) + tid;
int warps_per_group = targs->warps_per_group;
int groups_per_core = targs->groups_per_core;
int X = ctx->num_groups[0];
int Y = ctx->num_groups[1];
int XY = X * Y;
int threads_per_warp = vx_num_threads();
int warp_id = vx_warp_id();
int thread_id = vx_thread_id();
if (p_wspawn_args->isXYpow2) {
for (int wg_id = offset, N = wg_id + tK * NT; wg_id < N; wg_id += NT ) {
int k = wg_id >> p_wspawn_args->log2XY;
int wg_2d = wg_id - k * XY;
int j = wg_2d >> p_wspawn_args->log2X;
int i = wg_2d - j * X;
int local_offset = wg_id * p_wspawn_args->local_size;
(p_wspawn_args->callback)(arg, ctx, i, j, k, local_offset);
}
} else {
for (int wg_id = offset, N = wg_id + tK * NT; wg_id < N; wg_id += NT ) {
int k = wg_id / XY;
int wg_2d = wg_id - k * XY;
int j = wg_2d / X;
int i = wg_2d - j * X;
int local_offset = wg_id * p_wspawn_args->local_size;
(p_wspawn_args->callback)(arg, ctx, i, j, k, local_offset);
}
int iterations = targs->warp_batches + (warp_id < targs->remaining_warps);
int local_group_id = warp_id / warps_per_group;
int group_warp_id = warp_id - local_group_id * warps_per_group;
int local_task_id = group_warp_id * threads_per_warp + thread_id;
int start_group = targs->group_offset + local_group_id;
int end_group = start_group + iterations * groups_per_core;
vx_spawn_tasks_ex_cb callback = targs->callback;
void* arg = targs->arg;
for (int group_id = start_group; group_id < end_group; group_id += groups_per_core) {
//vx_printf("*** warp_id=%d, thread_id=%d, local_task_id=%d, group_id=%d\n", warp_id, thread_id, local_task_id, group_id);
callback(local_task_id, group_id, arg);
}
}
static void __attribute__ ((noinline)) spawn_pocl_kernel_rem_stub() {
int tid = vx_thread_id();
static void __attribute__ ((noinline)) process_all_tasks_ex_stub() {
wspawn_tasks_ex_args_t* targs = (wspawn_tasks_ex_args_t*)csr_read(VX_CSR_MSCRATCH);
int warps_per_group = targs->warps_per_group;
int remaining_mask = targs->remaining_mask;
int warp_id = vx_warp_id();
int group_warp_id = warp_id % warps_per_group;
int threads_mask = (group_warp_id == warps_per_group-1) ? remaining_mask : -1;
//vx_printf("*** warp_id=%d, threads_mask=0x%x\n", warp_id, threads_mask);
wspawn_pocl_kernel_args_t* p_wspawn_args = (wspawn_pocl_kernel_args_t*)csr_read(VX_CSR_MSCRATCH);
pocl_kernel_context_t* ctx = p_wspawn_args->ctx;
void* arg = p_wspawn_args->arg;
// activate threads
vx_tmc(threads_mask);
int X = ctx->num_groups[0];
int Y = ctx->num_groups[1];
int XY = X * Y;
// process all tasks
process_all_tasks_ex();
int wg_id = p_wspawn_args->remain + tid;
int local_offset = wg_id * p_wspawn_args->local_size;
// disable all warps except warp0
vx_tmc(0 == warp_id);
}
if (p_wspawn_args->isXYpow2) {
int k = wg_id >> p_wspawn_args->log2XY;
int wg_2d = wg_id - k * XY;
int j = wg_2d >> p_wspawn_args->log2X;
int i = wg_2d - j * X;
(p_wspawn_args->callback)(arg, ctx, i, j, k, local_offset);
} else {
int k = wg_id / XY;
int wg_2d = wg_id - k * XY;
int j = wg_2d / X;
int i = wg_2d - j * X;
(p_wspawn_args->callback)(arg, ctx, i, j, k, local_offset);
void vx_syncthreads(int barrier_id) {
wspawn_tasks_ex_args_t* targs = (wspawn_tasks_ex_args_t*)csr_read(VX_CSR_MSCRATCH);
int barrier_enabled = targs->barrier_enabled;
if (!barrier_enabled)
return; // no need to synchronize
int warps_per_group = targs->warps_per_group;
int groups_per_core = targs->groups_per_core;
int num_barriers = vx_num_barriers();
int warp_id = vx_warp_id();
int local_group_id = warp_id / warps_per_group;
int id = barrier_id * groups_per_core + local_group_id;
// check barrier resource
if (id >= num_barriers) {
vx_printf("error: out of barrier resource (%d:%d)\n", id+1, num_barriers);
return;
}
//vx_printf("*** warp_id=%d, barrier_id=%d, id=%d\n", warp_id, barrier_id, id);
vx_barrier(id, warps_per_group);
}
static void __attribute__ ((noinline)) spawn_pocl_kernel_all_cb() {
// activate all threads
vx_tmc(-1);
// call stub routine
spawn_pocl_kernel_all_stub();
// disable warp
vx_tmc_zero();
}
void vx_spawn_pocl_kernel(pocl_kernel_context_t * ctx, pocl_kernel_cb callback, void * arg) {
// total number of WGs
int X = ctx->num_groups[0];
int Y = ctx->num_groups[1];
int Z = ctx->num_groups[2];
int XY = X * Y;
int num_tasks = XY * Z;
// device specs
int NC = vx_num_cores();
int NW = vx_num_warps();
int NT = vx_num_threads();
void vx_spawn_tasks_ex(int num_groups, int group_size, vx_spawn_tasks_ex_cb callback, void * arg) {
// device specifications
int num_cores = vx_num_cores();
int warps_per_core = vx_num_warps();
int threads_per_warp = vx_num_threads();
int core_id = vx_core_id();
// calculate necessary active cores
int WT = NW * NT;
int nC = (num_tasks > WT) ? (num_tasks / WT) : 1;
int nc = MIN(nC, NC);
if (core_id >= nc)
return; // terminate extra cores
// number of tasks per core
int tasks_per_core = num_tasks / nc;
int tasks_per_core_n1 = tasks_per_core;
if (core_id == (nc-1)) {
int rem = num_tasks - (nc * tasks_per_core);
tasks_per_core_n1 += rem; // last core also executes remaining WGs
// check group size
int threads_per_core = warps_per_core * threads_per_warp;
if (threads_per_core < group_size) {
vx_printf("error: group_size > threads_per_core (%d)\n", threads_per_core);
return;
}
// number of tasks per warp
int TW = tasks_per_core_n1 / NT; // occupied warps
int rT = tasks_per_core_n1 - TW * NT; // remaining threads
int fW = 1, rW = 0;
if (TW >= NW) {
fW = TW / NW; // full warps iterations
rW = TW - fW * NW; // remaining warps
int warps_per_group = group_size / threads_per_warp;
int remaining_threads = group_size - warps_per_group * threads_per_warp;
int remaining_mask = -1;
if (remaining_threads != 0) {
remaining_mask = (1 << remaining_threads) - 1;
warps_per_group++;
}
// fast path handling
char isXYpow2 = is_log2(XY);
char log2XY = log2_fast(XY);
char log2X = log2_fast(X);
int needed_warps = num_groups * warps_per_group;
int needed_cores = (needed_warps + warps_per_core-1) / warps_per_core;
int active_cores = MIN(needed_cores, num_cores);
int local_size = ctx->local_size[0] * ctx->local_size[1] * ctx->local_size[2];
int offset = core_id * tasks_per_core;
int remain = offset + (tasks_per_core_n1 - rT);
// only active cores participate
if (core_id >= active_cores)
return;
wspawn_pocl_kernel_args_t wspawn_args = {
ctx, callback, arg, local_size, offset, remain, fW, rW, isXYpow2, log2XY, log2X
int total_groups_per_core = num_groups / active_cores;
int remaining_groups_per_core = num_groups - active_cores * total_groups_per_core;
if (core_id < remaining_groups_per_core)
total_groups_per_core++;
// calculate number of warps to activate
int groups_per_core = warps_per_core / warps_per_group;
int total_warps_per_core = total_groups_per_core * warps_per_group;
int warp_batches = 1, remaining_warps = 0;
int active_warps = total_warps_per_core;
if (active_warps > warps_per_core) {
active_warps = groups_per_core * warps_per_group;
warp_batches = total_warps_per_core / active_warps;
remaining_warps = total_warps_per_core - warp_batches * active_warps;
}
// calculate offsets for group distribution
int group_offset = core_id * total_groups_per_core + MIN(core_id, remaining_groups_per_core);
// check if warp barriers are needed
int barrier_enabled = (group_size > threads_per_warp);
// prepare scheduler arguments
wspawn_tasks_ex_args_t wspawn_args = {
callback,
arg,
group_offset,
warp_batches,
remaining_warps,
warps_per_group,
groups_per_core,
remaining_mask,
barrier_enabled
};
csr_write(VX_CSR_MSCRATCH, &wspawn_args);
if (TW >= 1) {
// execute callback on other warps
int nw = MIN(TW, NW);
vx_wspawn(nw, spawn_pocl_kernel_all_cb);
//vx_printf("*** group_offset=%d, warp_batches=%d, remaining_warps=%d, warps_per_group=%d, groups_per_core=%d, remaining_mask=%d\n", group_offset, warp_batches, remaining_warps, warps_per_group, groups_per_core, remaining_mask);
// activate all threads
vx_tmc(-1);
// execute callback on other warps
vx_wspawn(active_warps, process_all_tasks_ex_stub);
// call stub routine
spawn_pocl_kernel_all_stub();
// back to single-threaded
vx_tmc_one();
}
if (rT != 0) {
// activate remaining threads
int tmask = (1 << rT) - 1;
vx_tmc(tmask);
// call stub routine
spawn_pocl_kernel_rem_stub();
// back to single-threaded
vx_tmc_one();
}
// execute callback on warp0
process_all_tasks_ex_stub();
// wait for spawned tasks to complete
vx_wspawn(1, 0);

View file

@ -0,0 +1,14 @@
ROOT_DIR := $(realpath ../../..)
include $(ROOT_DIR)/config.mk
PROJECT := sgemm2x
SRC_DIR := $(VORTEX_HOME)/tests/regression/$(PROJECT)
SRCS := $(SRC_DIR)/main.cpp
VX_SRCS := $(SRC_DIR)/kernel.cpp
OPTS ?= -n16
include ../common.mk

View file

@ -0,0 +1,19 @@
#ifndef _COMMON_H_
#define _COMMON_H_
#ifndef TYPE
#define TYPE int
#endif
typedef struct {
uint32_t num_groups;
uint32_t group_size;
uint32_t size;
uint32_t tile_size;
uint64_t local_addr;
uint64_t A_addr;
uint64_t B_addr;
uint64_t C_addr;
} kernel_arg_t;
#endif

View file

@ -0,0 +1,58 @@
#include <stdint.h>
#include <vx_intrinsics.h>
#include <vx_spawn.h>
#include <vx_print.h>
#include "common.h"
void sgemm_kernel(int local_task_id, int group_id, kernel_arg_t *arg) {
auto local_ptr = reinterpret_cast<TYPE*>(arg->local_addr);
auto A_ptr = reinterpret_cast<TYPE*>(arg->A_addr);
auto B_ptr = reinterpret_cast<TYPE*>(arg->B_addr);
auto C_ptr = reinterpret_cast<TYPE*>(arg->C_addr);
auto size = arg->size;
auto tile_size = arg->tile_size;
auto num_groups = arg->num_groups;
auto group_size = arg->group_size;
auto num_tiles = size / tile_size;
// Determine row and column indices of the current subtask
auto l_row = local_task_id / tile_size;
auto l_col = local_task_id % tile_size;
// Determine row and column indices of the current task
auto g_row = (group_id / num_tiles) * tile_size + l_row;
auto g_col = (group_id % num_tiles) * tile_size + l_col;
// Allocate local memory for the tile of matrix A & B
auto local_A = local_ptr + group_id * group_size;
auto local_B = local_A + num_groups * group_size;
TYPE sum(0);
// Loop over tiles
for (uint32_t k = 0; k < size; k += tile_size) {
// Load tile of matrix A & B to local memory
local_A[l_row * tile_size + l_col] = A_ptr[g_row * size + (k + l_col)];
local_B[l_row * tile_size + l_col] = B_ptr[(k + l_row) * size + g_col];
// Synchronize all threads in current group
vx_syncthreads(0);
// Compute partial sum for the local tile
for (uint32_t j = 0; j < tile_size; ++j) {
sum += local_A[l_row * tile_size + j] * local_B[j * tile_size + l_col];
}
// Synchronize all threads in current group
vx_syncthreads(1);
}
// Store the computed sum into the result matrix C
C_ptr[g_row * size + g_col] = sum;
}
int main() {
kernel_arg_t* arg = (kernel_arg_t*)csr_read(VX_CSR_MSCRATCH);
vx_spawn_tasks_ex(arg->num_groups, arg->group_size, (vx_spawn_tasks_ex_cb)sgemm_kernel, arg);
return 0;
}

View file

@ -0,0 +1,275 @@
#include <iostream>
#include <unistd.h>
#include <string.h>
#include <vector>
#include <vortex.h>
#include "common.h"
#define FLOAT_ULP 6
#define RT_CHECK(_expr) \
do { \
int _ret = _expr; \
if (0 == _ret) \
break; \
printf("Error: '%s' returned %d!\n", #_expr, (int)_ret); \
cleanup(); \
exit(-1); \
} while (false)
///////////////////////////////////////////////////////////////////////////////
template <typename Type>
class Comparator {};
template <>
class Comparator<int> {
public:
static const char* type_str() {
return "integer";
}
static int generate() {
static int q(1);
return q++;
//return rand();
}
static bool compare(int a, int b, int index, int errors) {
if (a != b) {
if (errors < 100) {
printf("*** error: [%d] expected=%d, actual=%d\n", index, a, b);
}
return false;
}
return true;
}
};
template <>
class Comparator<float> {
private:
union Float_t { float f; int i; };
public:
static const char* type_str() {
return "float";
}
static int generate() {
return static_cast<float>(rand()) / RAND_MAX;
}
static bool compare(float a, float b, int index, int errors) {
union fi_t { float f; int32_t i; };
fi_t fa, fb;
fa.f = a;
fb.f = b;
auto d = std::abs(fa.i - fb.i);
if (d > FLOAT_ULP) {
if (errors < 100) {
printf("*** error: [%d] expected=%f, actual=%f\n", index, a, b);
}
return false;
}
return true;
}
};
static void matmul_cpu(TYPE* out, const TYPE* A, const TYPE* B, uint32_t width, uint32_t height) {
for (uint32_t row = 0; row < height; ++row) {
for (uint32_t col = 0; col < width; ++col) {
TYPE sum(0);
for (uint32_t e = 0; e < width; ++e) {
TYPE a = A[row * width + e];
TYPE b = B[e * width + col];
TYPE c = a * b;
sum += c;
//printf("out[%d][%d]=%d; a=%d, b=%d, c=%d\n", row, col, sum, a, b, c);
}
out[row * width + col] = sum;
}
}
}
const char* kernel_file = "kernel.vxbin";
uint32_t size = 16;
uint32_t tile_size = 4;
vx_device_h device = nullptr;
vx_buffer_h A_buffer = nullptr;
vx_buffer_h B_buffer = nullptr;
vx_buffer_h C_buffer = nullptr;
vx_buffer_h krnl_buffer = nullptr;
vx_buffer_h args_buffer = nullptr;
kernel_arg_t kernel_arg = {};
static void show_usage() {
std::cout << "Vortex Test." << std::endl;
std::cout << "Usage: [-k: kernel] [-n matrix_size] [-t:tile_size] [-h: help]" << std::endl;
}
static void parse_args(int argc, char **argv) {
int c;
while ((c = getopt(argc, argv, "n:t:k:h?")) != -1) {
switch (c) {
case 'n':
size = atoi(optarg);
break;
case 't':
tile_size = atoi(optarg);
break;
case 'k':
kernel_file = optarg;
break;
case 'h':
case '?': {
show_usage();
exit(0);
} break;
default:
show_usage();
exit(-1);
}
}
}
void cleanup() {
if (device) {
vx_mem_free(A_buffer);
vx_mem_free(B_buffer);
vx_mem_free(C_buffer);
vx_mem_free(krnl_buffer);
vx_mem_free(args_buffer);
vx_dev_close(device);
}
}
int main(int argc, char *argv[]) {
// parse command arguments
parse_args(argc, argv);
if ((size / tile_size) * tile_size != size) {
printf("Error: matrix size %d must be a multiple of tile size %d\n", size, tile_size);
return -1;
}
std::srand(50);
// open device connection
std::cout << "open device connection" << std::endl;
RT_CHECK(vx_dev_open(&device));
uint32_t size_sq = size * size;
uint32_t buf_size = size_sq * sizeof(TYPE);
uint32_t group_size = tile_size * tile_size;
uint32_t num_groups = (size * size) / group_size;
uint32_t local_mem = 2 * num_groups * group_size * sizeof(TYPE);
std::cout << "data type: " << Comparator<TYPE>::type_str() << std::endl;
std::cout << "matrix size: " << size << "x" << size << std::endl;
std::cout << "tile size: " << tile_size << "x" << tile_size << std::endl;
std::cout << "group size: " << group_size << std::endl;
std::cout << "number of groups: " << num_groups << std::endl;
std::cout << "local memory: " << local_mem << " bytes" << std::endl;
kernel_arg.num_groups = num_groups;
kernel_arg.group_size = group_size;
kernel_arg.size = size;
kernel_arg.tile_size = tile_size;
// check work group capacity
uint64_t num_warps, num_threads;
RT_CHECK(vx_dev_caps(device, VX_CAPS_NUM_WARPS, &num_warps));
RT_CHECK(vx_dev_caps(device, VX_CAPS_NUM_THREADS, &num_threads));
uint32_t threads_per_core = num_warps * num_threads;
RT_CHECK(threads_per_core < group_size);
// check local memory capacity
uint64_t max_local_mem;
RT_CHECK(vx_dev_caps(device, VX_CAPS_LOCAL_MEM_SIZE, &max_local_mem));
RT_CHECK(max_local_mem < local_mem);
// acquire local memory address
RT_CHECK(vx_dev_caps(device, VX_CAPS_LOCAL_MEM_ADDR, &kernel_arg.local_addr));
// allocate device memory
std::cout << "allocate device memory" << std::endl;
RT_CHECK(vx_mem_alloc(device, buf_size, VX_MEM_READ, &A_buffer));
RT_CHECK(vx_mem_address(A_buffer, &kernel_arg.A_addr));
RT_CHECK(vx_mem_alloc(device, buf_size, VX_MEM_READ, &B_buffer));
RT_CHECK(vx_mem_address(B_buffer, &kernel_arg.B_addr));
RT_CHECK(vx_mem_alloc(device, buf_size, VX_MEM_WRITE, &C_buffer));
RT_CHECK(vx_mem_address(C_buffer, &kernel_arg.C_addr));
std::cout << "local_addr=0x" << std::hex << kernel_arg.local_addr << std::endl;
std::cout << "A_addr=0x" << std::hex << kernel_arg.A_addr << std::endl;
std::cout << "B_addr=0x" << std::hex << kernel_arg.B_addr << std::endl;
std::cout << "C_addr=0x" << std::hex << kernel_arg.C_addr << std::endl;
// allocate host buffers
std::cout << "allocate host buffers" << std::endl;
std::vector<TYPE> h_A(size_sq);
std::vector<TYPE> h_B(size_sq);
std::vector<TYPE> h_C(size_sq);
// generate source data
for (uint32_t i = 0; i < size_sq; ++i) {
h_A[i] = Comparator<TYPE>::generate();
}
for (uint32_t i = 0; i < size_sq; ++i) {
h_B[i] = Comparator<TYPE>::generate();
}
// upload source buffer0
std::cout << "upload source buffer0" << std::endl;
RT_CHECK(vx_copy_to_dev(A_buffer, h_A.data(), 0, buf_size));
// upload source buffer1
std::cout << "upload source buffer1" << std::endl;
RT_CHECK(vx_copy_to_dev(B_buffer, h_B.data(), 0, buf_size));
// upload program
std::cout << "upload program" << std::endl;
RT_CHECK(vx_upload_kernel_file(device, kernel_file, &krnl_buffer));
// upload kernel argument
std::cout << "upload kernel argument" << std::endl;
RT_CHECK(vx_upload_bytes(device, &kernel_arg, sizeof(kernel_arg_t), &args_buffer));
// start device
std::cout << "start device" << std::endl;
RT_CHECK(vx_start(device, krnl_buffer, args_buffer));
// wait for completion
std::cout << "wait for completion" << std::endl;
RT_CHECK(vx_ready_wait(device, VX_MAX_TIMEOUT));
// download destination buffer
std::cout << "download destination buffer" << std::endl;
RT_CHECK(vx_copy_from_dev(h_C.data(), C_buffer, 0, buf_size));
// verify result
std::cout << "verify result" << std::endl;
int errors = 0;
{
std::vector<TYPE> h_ref(size_sq);
matmul_cpu(h_ref.data(), h_A.data(), h_B.data(), size, size);
for (uint32_t i = 0; i < h_ref.size(); ++i) {
if (!Comparator<TYPE>::compare(h_C[i], h_ref[i], i, errors)) {
++errors;
}
}
}
// cleanup
std::cout << "cleanup" << std::endl;
cleanup();
if (errors != 0) {
std::cout << "Found " << std::dec << errors << " errors!" << std::endl;
std::cout << "FAILED!" << std::endl;
return errors;
}
std::cout << "PASSED!" << std::endl;
return 0;
}

View file

@ -22,12 +22,12 @@ void kernel_body(uint32_t task_id, kernel_arg_t* __UNIFORM__ arg) {
col = task_id % size;
}
TYPE sum (0);
TYPE sum(0);
for (int e = 0; e < size; ++e) {
sum += A[row * size + e] * B[e * size + col];
}
C[row * size + col] = sum;
C[task_id] = sum;
}
int main() {

View file

@ -140,13 +140,13 @@ int main(int argc, char *argv[]) {
std::cout << "open device connection" << std::endl;
RT_CHECK(vx_dev_open(&device));
uint32_t num_points = size * size;
uint32_t buf_size = num_points * sizeof(TYPE);
uint32_t size_sq = size * size;
uint32_t buf_size = size_sq * sizeof(TYPE);
std::cout << "data type: " << Comparator<TYPE>::type_str() << std::endl;
std::cout << "matrix size: " << size << "x" << size << std::endl;
kernel_arg.num_tasks = num_points;
kernel_arg.num_tasks = size_sq;
kernel_arg.size = size;
kernel_arg.log2_size = log2(size);
@ -159,19 +159,17 @@ int main(int argc, char *argv[]) {
RT_CHECK(vx_mem_alloc(device, buf_size, VX_MEM_WRITE, &C_buffer));
RT_CHECK(vx_mem_address(C_buffer, &kernel_arg.C_addr));
std::cout << "dev_argA=0x" << std::hex << kernel_arg.A_addr << std::endl;
std::cout << "dev_argB=0x" << std::hex << kernel_arg.B_addr << std::endl;
std::cout << "dev_argC=0x" << std::hex << kernel_arg.C_addr << std::endl;
std::cout << "A_addr=0x" << std::hex << kernel_arg.A_addr << std::endl;
std::cout << "B_addr=0x" << std::hex << kernel_arg.B_addr << std::endl;
std::cout << "C_addr=0x" << std::hex << kernel_arg.C_addr << std::endl;
// generate source data
std::vector<TYPE> h_A(num_points);
std::vector<TYPE> h_B(num_points);
std::vector<TYPE> h_C(num_points);
for (uint32_t i = 0; i < num_points; ++i) {
auto a = static_cast<float>(std::rand()) / RAND_MAX;
auto b = static_cast<float>(std::rand()) / RAND_MAX;
h_A[i] = static_cast<TYPE>(a * size);
h_B[i] = static_cast<TYPE>(b * size);
std::vector<TYPE> h_A(size_sq);
std::vector<TYPE> h_B(size_sq);
std::vector<TYPE> h_C(size_sq);
for (uint32_t i = 0; i < size_sq; ++i) {
h_A[i] = Comparator<TYPE>::generate();
h_B[i] = Comparator<TYPE>::generate();
}
// upload matrix A buffer
@ -216,7 +214,7 @@ int main(int argc, char *argv[]) {
std::cout << "verify result" << std::endl;
int errors = 0;
{
std::vector<TYPE> h_ref(num_points);
std::vector<TYPE> h_ref(size_sq);
matmul_cpu(h_ref.data(), h_A.data(), h_B.data(), size, size);
for (uint32_t i = 0; i < h_ref.size(); ++i) {