test program draft

This commit is contained in:
Euna Kim 2019-11-23 03:36:34 -05:00
parent 0d763fd735
commit 715dc3089e
13 changed files with 6378 additions and 0 deletions

View file

@ -0,0 +1,41 @@
LIB_PATH = ../../runtime
COMP = /nethome/ekim79/riscv-gnu-toolchain/drops/bin/riscv32-unknown-elf-gcc
CC_FLAGS = -ffreestanding -O0 -Wl,--gc-sections -nostartfiles -nostdlib -nostartfiles -nodefaultlibs -Wl,-Bstatic,-T,$(LIB_PATH)/mains/vortex_link.ld -march=rv32imv -mabi=ilp32
DMP = /nethome/ekim79/riscv-gnu-toolchain/drops/bin/riscv32-unknown-elf-objdump
CPY = /nethome/ekim79/riscv-gnu-toolchain/drops/bin/riscv32-unknown-elf-objcopy
# VX_STR = ../../startup/vx_start.s
NEWLIB = $(LIB_PATH)/newlib/newlib.c
VX_STR = $(LIB_PATH)/startup/vx_start.s
VX_INT = $(LIB_PATH)/intrinsics/vx_intrinsics.s
VX_IO = $(LIB_PATH)/io/vx_io.s $(LIB_PATH)/io/vx_io.c
VX_API = $(LIB_PATH)/vx_api/vx_api.c
VX_TEST = $(LIB_PATH)/tests/tests.c
VX_FIO = $(LIB_PATH)/fileio/fileio.s
VX_VEC1 = vx_vec_vvaddint32.s
VX_VEC2 = vx_vec_saxpy.s #float --> int
VX_VEC3 = vx_vec_sgemm_float.s #float --> int
VX_VEC4 = vx_vec_vsadd.s
VX_VEC5 = vx_vec_memcpy.s
LIBS = /nethome/ekim79/riscv-gnu-toolchain/drops/riscv32-unknown-elf/lib/libc.a /nethome/ekim79/riscv-gnu-toolchain/drops/riscv32-unknown-elf/lib/libstdc++.a -static-libgcc -lgcc
VX_MAIN = vx_vec_benchmark
all: HEX DUMP ELF
DUMP: ELF
$(DMP) -D $(VX_MAIN).elf > $(VX_MAIN).dump
HEX: ELF
$(CPY) -O ihex $(VX_MAIN).elf $(VX_MAIN).hex
ELF:
$(COMP) $(CC_FLAGS) $(VX_STR) $(VX_VEC1) $(VX_FIO) $(NEWLIB) $(VX_INT) $(VX_IO) $(VX_API) $(VX_TEST) $(VX_MAIN).c $(LIBS) -Iinclude -o $(VX_MAIN).elf
# $(COMP) $(CC_FLAGS) $(VX_STR) $(VX_VEC2) $(VX_FIO) $(NEWLIB) $(VX_INT) $(VX_IO) $(VX_API) $(VX_TEST) $(VX_MAIN).c $(LIBS) -Iinclude -o $(VX_MAIN).elf
# $(COMP) $(CC_FLAGS) $(VX_STR) $(VX_VEC3) $(VX_FIO) $(NEWLIB) $(VX_INT) $(VX_IO) $(VX_API) $(VX_TEST) $(VX_MAIN).c $(LIBS) -Iinclude -o $(VX_MAIN).elf
# $(COMP) $(CC_FLAGS) $(VX_STR) $(VX_VEC4) $(VX_FIO) $(NEWLIB) $(VX_INT) $(VX_IO) $(VX_API) $(VX_TEST) $(VX_MAIN).c $(LIBS) -Iinclude -o $(VX_MAIN).elf
# $(COMP) $(CC_FLAGS) $(VX_STR) $(VX_VEC5) $(VX_FIO) $(NEWLIB) $(VX_INT) $(VX_IO) $(VX_API) $(VX_TEST) $(VX_MAIN).c $(LIBS) -Iinclude -o $(VX_MAIN).elf~

View file

@ -0,0 +1,9 @@
1. add benchmarks under (Vortex/benchmarks/..)
1.1 bfs --> blas spmv approach
1.2 kmeans // stage 2
1.3 saxpy --> sample
1.4 sfilter // stage 2
1.5 sgemm --> sample modify (float --> int)
1.6 vecadd --> sample

View file

@ -0,0 +1,156 @@
#include <stdio.h>
#include <stdlib.h>
#include "../../runtime/intrinsics/vx_intrinsics.h"
#include "vx_vec_benchmark.h"
int main()
{
vx_tmc(1);
int n = 5;
int *a = (int*)malloc(sizeof(int) * n); //{1, 1, 1, 1, 1};
int *b = (int*)malloc(sizeof(int) * n); //{1, 1, 1, 1, 1};
int *c = (int*)malloc(sizeof(int) * n); //{1, 1, 1, 1, 1};
for (int i = 0; i < n; ++i) { a[i] = 1; b[i] = 2; c[i] = 5; }
#if 1
//---------------------------------------------------------------
/* vvaddint32
* # vector-vector add routine of 32-bit integers
* # void vvaddint32(size_t n, const int*x, const int*y, int*z)
* # { for (size_t i=0; i<n; i++) { z[i]=x[i]+y[i]; } } */
printf("vvaddint...\na[%d]: ", n);
for(int i = 0; i < n; ++i) printf("%d ", a[i]);
printf("\nb[%d]: ", n);
for(int i = 0; i < n; ++i) printf("%d ", b[i]);
printf("\nc[%d] = a[%d] + b[%d]: ", n, n, n);
for(int i = 0; i < n; ++i) printf("%d ", c[i]);
vx_vec_vvaddint32(n, a, b, c);
for(int i = 0; i < n; ++i)
{
if(c[i] != (a[i]+b[i]))
{
printf("\n<vddint32> failed at <index: %d>! \n", i);
return 1;
}
}
printf("\nPASSED.......................... <vddint32> \n");
#endif
#if 0
//---------------------------------------------------------------
/* # vector-scalar add
# for (i=0; i<N; i++) { C[i] = A[i] + B; } // 32-bit ints */
for (int i = 0; i < n; ++i) { a[i] = 1; b[i] = 1;}
int scalar = 10;
printf("vsadd...scalar:%d\na[%d]: ", scalar, n);
for(int i = 0; i < n; ++i) printf("%d \n", a[i]);
printf("\nb: %d", scalar);
vx_vec_vsadd(n, a, scalar);
for(int i = 0; i < n; ++i)
{
if(a[i] != (b[i] * scalar))
{
printf("\n<vsadd> failed at <index: %d>! \n", i);
return 1;
}
}
printf("\nPASSED.......................... <vsadd> \n");
#endif
#if 0
//---------------------------------------------------------------
/* # memory copy
# void *memcpy(void* dest, const void* src, size_t n) */
for (int i = 0; i < n; ++i) { a[i] = 1; b[i] = 2;}
printf("memcpy\na[%d]: ", n);
for(int i = 0; i < n; ++i) printf("%d \n", a[i]);
printf("\nb[%d]: ", n);
for(int i = 0; i < n; ++i) printf("%d \n", b[i]);
vx_vec_memcpy(a, b, n);
for(int i = 0; i < n; ++i)
{
if(a[i] != b[i])
{
printf("\n<memcpy> failed at <index: %d>! \n", i);
return;
}
}
printf("\nPASSED.......................... <memcpy> \n");
//---------------------------------------------------------------
/* # void saxpy(size_t n, const float a, const float *x, float *y)
# ==> convert to int!!
# void saxpy(size_t n, const int a, const int *x, int *y)
# {
# size_t i;
# for (i=0; i<n; i++) y[i] = a * x[i] + y[i];
# } */
for (int i = 0; i < n; ++i) { a[i] = 1; b[i] = 2; c[i] = 2;}
printf("saxpy\na[%d]: ", n);
for(int i = 0; i < n; ++i) printf("%d \n", a[i]);
printf("\nb[%d]: ", n);
for(int i = 0; i < n; ++i) printf("%d \n", b[i]);
vx_vec_saxpy(n, scalar, a, b);
for(int i = 0; i < n; ++i)
{
if(b[i] != ((a[i] * scalar) + c[i]))
{
printf("\n<saxpy> failed at <index: %d>! \n", i);
return;
}
}
printf("\nPASSED.......................... <saxpy> \n");
//---------------------------------------------------------------
/* # void sgemm_nn(size_t n, size_t m, size_t k, const float*a, // m * k matrix
# size_t lda, const float*b, // k * n matrix
# size_t ldb, float*c, // m * n matrix
# size_t ldc)
# c += a*b (alpha=1, no transpose on input matrices)
# matrices stored in C row-major order */
int m = 8;
int k = 8;
int n = 8
int lda = 4;
int ldb = 4;
int ldc = 4;
int* a1 = (int*)malloc(sizeof(m * k));
int* b1 = (int*)malloc(sizeof(k * n));
int* c1 = (int*)malloc(sizeof(m * n));
for(int i = 0; i < (m * k); ++i) a1[i] = 1;
for(int i = 0; i < (k * n); ++i) b1[i] = 1;
for(int i = 0; i < (m * n); ++i) c1[i] = 1;
printf("sgemm_nn\na[%d]: ", n);
for(int i = 0; i < n; ++i) printf("%d \n", a1[i]);
printf("\nb[%d]: ", n);
for(int i = 0; i < n; ++i) printf("%d \n", b1[i]);
vx_vec_sgemm_nn(n, m, k, a1, lda, b1, ldb, c1, ldc);
//for(int i = 0; i < n; ++i)
//{
// if(b[i] != ((a[i] * scalar) + c[i]))
// {
// printf("\n<sgemm_nn> failed at <index: %d>! \n", i);
// return;
// }
//}
printf("\nNOT TESTED.......................... <sgemm_nn> \n");
//---------------------------------------------------------------
#endif
vx_tmc(0);
return 0;
}

View file

@ -0,0 +1,16 @@
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
void vx_vec_vvaddint32(int n, int* a, int* b, int *c);
//void vx_vec_vsadd(int n, int* a, int scalar);
//void vx_vec_memcpy(int* a, int* b, int n);
//void vx_vec_saxpy(int n, int scalar, int* a, int* b);
//void vx_vec_sgemm_nn(int n, int m, int k, int* a1, int lda, int* b1, int ldb, int* c1, int ldc);
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,17 @@
.type vx_vec_memcpy, @function
.global vx_vec_memcpy
# void *memcpy(void* dest, const void* src, size_t n)
# a0=dest, a1=src, a2=n
#
vx_vec_memcpy:
# memcpy
mv a3, a0 # Copy destination
vsetvli t0, a2, e8,m8 # Vectors of 8b
loop:
vlb.v v0, (a1) # Load bytes
add a1, a1, t0 # Bump pointer
sub a2, a2, t0 # Decrement count
vsb.v v0, (a3) # Store bytes
add a3, a3, t0 # Bump pointer
bnez a2, loop # Any more?
ret # Return

View file

@ -0,0 +1,28 @@
.type vx_vec_saxpy, @function
.global vx_vec_saxpy
# void
# saxpy(size_t n, const float a, const float *x, float *y)
# {
# size_t i;
# for (i=0; i<n; i++)
# y[i] = a * x[i] + y[i];
# }
#
# register arguments:
# a0 n
# fa0 a
# a1 x
# a2 y
vx_vec_saxpy:
vsetvli a4, a0, e32, m8
saxpy:
vlw.v v0, (a1)
sub a0, a0, a4
slli a4, a4, 2
add a1, a1, a4
vlw.v v8, (a2)
vfmacc.vf v8, fa0, v0
vsw.v v8, (a2)
add a2, a2, a4
bnez a0, saxpy
ret

View file

@ -0,0 +1,28 @@
.type vx_vec_saxpy, @function
.global vx_vec_saxpy_float
# void
# saxpy(size_t n, const float a, const float *x, float *y)
# {
# size_t i;
# for (i=0; i<n; i++)
# y[i] = a * x[i] + y[i];
# }
#
# register arguments:
# a0 n
# fa0 a
# a1 x
# a2 y
vx_vec_saxpy_float:
vsetvli a4, a0, e32, m8
saxpy:
vlw.v v0, (a1)
sub a0, a0, a4
slli a4, a4, 2
add a1, a1, a4
vlw.v v8, (a2)
vfmacc.vf v8, fa0, v0
vsw.v v8, (a2)
add a2, a2, a4
bnez a0, saxpy
ret

View file

@ -0,0 +1,220 @@
.type vx_vec_sgemm_nn, @function
.global vx_vec_sgemm_nn
# RV64IDV system
#
# void
# sgemm_nn(size_t n,
# size_t m,
# size_t k,
# const float*a, // m * k matrix
# size_t lda,
# const float*b, // k * n matrix
# size_t ldb,
# float*c, // m * n matrix
# size_t ldc)
#
# c += a*b (alpha=1, no transpose on input matrices)
# matrices stored in C row-major order
#define n a0
#define m a1
#define k a2
#define ap a3
#define astride a4
#define bp a5
#define bstride a6
#define cp a7
#define cstride t0
#define kt t1
#define nt t2
#define bnp t3
#define cnp t4
#define akp t5
#define bkp s0
#define nvl s1
#define ccp s2
#define amp s3
# Use args as additional temporaries
#define ft12 fa0
#define ft13 fa1
#define ft14 fa2
#define ft15 fa3
# This version holds a 16*VLMAX block of C matrix in vector registers
# in inner loop, but otherwise does not cache or TLB tiling.
vx_vec_sgemm_nn:
#sgemm_nn:
addi sp, sp, -FRAMESIZE
sd s0, OFFSET(sp)
sd s1, OFFSET(sp)
sd s2, OFFSET(sp)
# Check for zero size matrices
beqz n, exit
beqz m, exit
beqz k, exit
# Convert elements strides to byte strides.
ld cstride, OFFSET(sp) # Get arg from stack frame
slli astride, astride, 2
slli bstride, bstride, 2
slli cstride, cstride, 2
slti t6, m, 16
bnez t6, end_rows
c_row_loop: # Loop across rows of C blocks
mv nt, n # Initialize n counter for next row of C blocks
mv bnp, bp # Initialize B n-loop pointer to start
mv cnp, cp # Initialize C n-loop pointer
c_col_loop: # Loop across one row of C blocks
vsetvli nvl, nt, e32 # 32-bit vectors, LMUL=1
mv akp, ap # reset pointer into A to beginning
mv bkp, bnp # step to next column in B matrix
# Initalize current C submatrix block from memory.
vlw.v v0, (cnp); add ccp, cnp, cstride;
vlw.v v1, (ccp); add ccp, ccp, cstride;
vlw.v v2, (ccp); add ccp, ccp, cstride;
vlw.v v3, (ccp); add ccp, ccp, cstride;
vlw.v v4, (ccp); add ccp, ccp, cstride;
vlw.v v5, (ccp); add ccp, ccp, cstride;
vlw.v v6, (ccp); add ccp, ccp, cstride;
vlw.v v7, (ccp); add ccp, ccp, cstride;
vlw.v v8, (ccp); add ccp, ccp, cstride;
vlw.v v9, (ccp); add ccp, ccp, cstride;
vlw.v v10, (ccp); add ccp, ccp, cstride;
vlw.v v11, (ccp); add ccp, ccp, cstride;
vlw.v v12, (ccp); add ccp, ccp, cstride;
vlw.v v13, (ccp); add ccp, ccp, cstride;
vlw.v v14, (ccp); add ccp, ccp, cstride;
vlw.v v15, (ccp)
mv kt, k # Initialize inner loop counter
# Inner loop scheduled assuming 4-clock occupancy of vfmacc instruction and single-issue pipeline
# Software pipeline loads
flw ft0, (akp); add amp, akp, astride;
flw ft1, (amp); add amp, amp, astride;
flw ft2, (amp); add amp, amp, astride;
flw ft3, (amp); add amp, amp, astride;
# Get vector from B matrix
vlw.v v16, (bkp)
# Loop on inner dimension for current C block
k_loop:
vfmacc.vf v0, ft0, v16
add bkp, bkp, bstride
flw ft4, (amp)
add amp, amp, astride
vfmacc.vf v1, ft1, v16
addi kt, kt, -1 # Decrement k counter
flw ft5, (amp)
add amp, amp, astride
vfmacc.vf v2, ft2, v16
flw ft6, (amp)
add amp, amp, astride
flw ft7, (amp)
vfmacc.vf v3, ft3, v16
add amp, amp, astride
flw ft8, (amp)
add amp, amp, astride
vfmacc.vf v4, ft4, v16
flw ft9, (amp)
add amp, amp, astride
vfmacc.vf v5, ft5, v16
flw ft10, (amp)
add amp, amp, astride
vfmacc.vf v6, ft6, v16
flw ft11, (amp)
add amp, amp, astride
vfmacc.vf v7, ft7, v16
flw ft12, (amp)
add amp, amp, astride
vfmacc.vf v8, ft8, v16
flw ft13, (amp)
add amp, amp, astride
vfmacc.vf v9, ft9, v16
flw ft14, (amp)
add amp, amp, astride
vfmacc.vf v10, ft10, v16
flw ft15, (amp)
add amp, amp, astride
addi akp, akp, 4 # Move to next column of a
vfmacc.vf v11, ft11, v16
beqz kt, 1f # Don't load past end of matrix
flw ft0, (akp)
add amp, akp, astride
1: vfmacc.vf v12, ft12, v16
beqz kt, 1f
flw ft1, (amp)
add amp, amp, astride
1: vfmacc.vf v13, ft13, v16
beqz kt, 1f
flw ft2, (amp)
add amp, amp, astride
1: vfmacc.vf v14, ft14, v16
beqz kt, 1f # Exit out of loop
flw ft3, (amp)
add amp, amp, astride
vfmacc.vf v15, ft15, v16
vlw.v v16, (bkp) # Get next vector from B matrix, overlap loads with jump stalls
j k_loop
1: vfmacc.vf v15, ft15, v16
# Save C matrix block back to memory
vsw.v v0, (cnp); add ccp, cnp, cstride;
vsw.v v1, (ccp); add ccp, ccp, cstride;
vsw.v v2, (ccp); add ccp, ccp, cstride;
vsw.v v3, (ccp); add ccp, ccp, cstride;
vsw.v v4, (ccp); add ccp, ccp, cstride;
vsw.v v5, (ccp); add ccp, ccp, cstride;
vsw.v v6, (ccp); add ccp, ccp, cstride;
vsw.v v7, (ccp); add ccp, ccp, cstride;
vsw.v v8, (ccp); add ccp, ccp, cstride;
vsw.v v9, (ccp); add ccp, ccp, cstride;
vsw.v v10, (ccp); add ccp, ccp, cstride;
vsw.v v11, (ccp); add ccp, ccp, cstride;
vsw.v v12, (ccp); add ccp, ccp, cstride;
vsw.v v13, (ccp); add ccp, ccp, cstride;
vsw.v v14, (ccp); add ccp, ccp, cstride;
vsw.v v15, (ccp)
# Following tail instructions should be scheduled earlier in free slots during C block save.
# Leaving here for clarity.
# Bump pointers for loop across blocks in one row
slli t6, nvl, 2
add cnp, cnp, t6 # Move C block pointer over
add bnp, bnp, t6 # Move B block pointer over
sub nt, nt, nvl # Decrement element count in n dimension
bnez nt, c_col_loop # Any more to do?
# Move to next set of rows
addi m, m, -16 # Did 16 rows above
slli t6, astride, 4 # Multiply astride by 16
add ap, ap, t6 # Move A matrix pointer down 16 rows
slli t6, cstride, 4 # Multiply cstride by 16
add cp, cp, t6 # Move C matrix pointer down 16 rows
slti t6, m, 16
beqz t6, c_row_loop
# Handle end of matrix with fewer than 16 rows.
# Can use smaller versions of above decreasing in powers-of-2 depending on code-size concerns.
end_rows:
# Not done.
exit:
ld s0, OFFSET(sp)
ld s1, OFFSET(sp)
ld s2, OFFSET(sp)
addi sp, sp, FRAMESIZE
ret

View file

@ -0,0 +1,220 @@
.type vx_vec_sgemm_nn_float, @function
.global vx_vec_sgemm_nn_float
# RV64IDV system
#
# void
# sgemm_nn(size_t n,
# size_t m,
# size_t k,
# const float*a, // m * k matrix
# size_t lda,
# const float*b, // k * n matrix
# size_t ldb,
# float*c, // m * n matrix
# size_t ldc)
#
# c += a*b (alpha=1, no transpose on input matrices)
# matrices stored in C row-major order
#define n a0
#define m a1
#define k a2
#define ap a3
#define astride a4
#define bp a5
#define bstride a6
#define cp a7
#define cstride t0
#define kt t1
#define nt t2
#define bnp t3
#define cnp t4
#define akp t5
#define bkp s0
#define nvl s1
#define ccp s2
#define amp s3
# Use args as additional temporaries
#define ft12 fa0
#define ft13 fa1
#define ft14 fa2
#define ft15 fa3
# This version holds a 16*VLMAX block of C matrix in vector registers
# in inner loop, but otherwise does not cache or TLB tiling.
vx_vec_sgemm_nn_float:
#sgemm_nn:
addi sp, sp, -FRAMESIZE
sd s0, OFFSET(sp)
sd s1, OFFSET(sp)
sd s2, OFFSET(sp)
# Check for zero size matrices
beqz n, exit
beqz m, exit
beqz k, exit
# Convert elements strides to byte strides.
ld cstride, OFFSET(sp) # Get arg from stack frame
slli astride, astride, 2
slli bstride, bstride, 2
slli cstride, cstride, 2
slti t6, m, 16
bnez t6, end_rows
c_row_loop: # Loop across rows of C blocks
mv nt, n # Initialize n counter for next row of C blocks
mv bnp, bp # Initialize B n-loop pointer to start
mv cnp, cp # Initialize C n-loop pointer
c_col_loop: # Loop across one row of C blocks
vsetvli nvl, nt, e32 # 32-bit vectors, LMUL=1
mv akp, ap # reset pointer into A to beginning
mv bkp, bnp # step to next column in B matrix
# Initalize current C submatrix block from memory.
vlw.v v0, (cnp); add ccp, cnp, cstride;
vlw.v v1, (ccp); add ccp, ccp, cstride;
vlw.v v2, (ccp); add ccp, ccp, cstride;
vlw.v v3, (ccp); add ccp, ccp, cstride;
vlw.v v4, (ccp); add ccp, ccp, cstride;
vlw.v v5, (ccp); add ccp, ccp, cstride;
vlw.v v6, (ccp); add ccp, ccp, cstride;
vlw.v v7, (ccp); add ccp, ccp, cstride;
vlw.v v8, (ccp); add ccp, ccp, cstride;
vlw.v v9, (ccp); add ccp, ccp, cstride;
vlw.v v10, (ccp); add ccp, ccp, cstride;
vlw.v v11, (ccp); add ccp, ccp, cstride;
vlw.v v12, (ccp); add ccp, ccp, cstride;
vlw.v v13, (ccp); add ccp, ccp, cstride;
vlw.v v14, (ccp); add ccp, ccp, cstride;
vlw.v v15, (ccp)
mv kt, k # Initialize inner loop counter
# Inner loop scheduled assuming 4-clock occupancy of vfmacc instruction and single-issue pipeline
# Software pipeline loads
flw ft0, (akp); add amp, akp, astride;
flw ft1, (amp); add amp, amp, astride;
flw ft2, (amp); add amp, amp, astride;
flw ft3, (amp); add amp, amp, astride;
# Get vector from B matrix
vlw.v v16, (bkp)
# Loop on inner dimension for current C block
k_loop:
vfmacc.vf v0, ft0, v16
add bkp, bkp, bstride
flw ft4, (amp)
add amp, amp, astride
vfmacc.vf v1, ft1, v16
addi kt, kt, -1 # Decrement k counter
flw ft5, (amp)
add amp, amp, astride
vfmacc.vf v2, ft2, v16
flw ft6, (amp)
add amp, amp, astride
flw ft7, (amp)
vfmacc.vf v3, ft3, v16
add amp, amp, astride
flw ft8, (amp)
add amp, amp, astride
vfmacc.vf v4, ft4, v16
flw ft9, (amp)
add amp, amp, astride
vfmacc.vf v5, ft5, v16
flw ft10, (amp)
add amp, amp, astride
vfmacc.vf v6, ft6, v16
flw ft11, (amp)
add amp, amp, astride
vfmacc.vf v7, ft7, v16
flw ft12, (amp)
add amp, amp, astride
vfmacc.vf v8, ft8, v16
flw ft13, (amp)
add amp, amp, astride
vfmacc.vf v9, ft9, v16
flw ft14, (amp)
add amp, amp, astride
vfmacc.vf v10, ft10, v16
flw ft15, (amp)
add amp, amp, astride
addi akp, akp, 4 # Move to next column of a
vfmacc.vf v11, ft11, v16
beqz kt, 1f # Don't load past end of matrix
flw ft0, (akp)
add amp, akp, astride
1: vfmacc.vf v12, ft12, v16
beqz kt, 1f
flw ft1, (amp)
add amp, amp, astride
1: vfmacc.vf v13, ft13, v16
beqz kt, 1f
flw ft2, (amp)
add amp, amp, astride
1: vfmacc.vf v14, ft14, v16
beqz kt, 1f # Exit out of loop
flw ft3, (amp)
add amp, amp, astride
vfmacc.vf v15, ft15, v16
vlw.v v16, (bkp) # Get next vector from B matrix, overlap loads with jump stalls
j k_loop
1: vfmacc.vf v15, ft15, v16
# Save C matrix block back to memory
vsw.v v0, (cnp); add ccp, cnp, cstride;
vsw.v v1, (ccp); add ccp, ccp, cstride;
vsw.v v2, (ccp); add ccp, ccp, cstride;
vsw.v v3, (ccp); add ccp, ccp, cstride;
vsw.v v4, (ccp); add ccp, ccp, cstride;
vsw.v v5, (ccp); add ccp, ccp, cstride;
vsw.v v6, (ccp); add ccp, ccp, cstride;
vsw.v v7, (ccp); add ccp, ccp, cstride;
vsw.v v8, (ccp); add ccp, ccp, cstride;
vsw.v v9, (ccp); add ccp, ccp, cstride;
vsw.v v10, (ccp); add ccp, ccp, cstride;
vsw.v v11, (ccp); add ccp, ccp, cstride;
vsw.v v12, (ccp); add ccp, ccp, cstride;
vsw.v v13, (ccp); add ccp, ccp, cstride;
vsw.v v14, (ccp); add ccp, ccp, cstride;
vsw.v v15, (ccp)
# Following tail instructions should be scheduled earlier in free slots during C block save.
# Leaving here for clarity.
# Bump pointers for loop across blocks in one row
slli t6, nvl, 2
add cnp, cnp, t6 # Move C block pointer over
add bnp, bnp, t6 # Move B block pointer over
sub nt, nt, nvl # Decrement element count in n dimension
bnez nt, c_col_loop # Any more to do?
# Move to next set of rows
addi m, m, -16 # Did 16 rows above
slli t6, astride, 4 # Multiply astride by 16
add ap, ap, t6 # Move A matrix pointer down 16 rows
slli t6, cstride, 4 # Multiply cstride by 16
add cp, cp, t6 # Move C matrix pointer down 16 rows
slti t6, m, 16
beqz t6, c_row_loop
# Handle end of matrix with fewer than 16 rows.
# Can use smaller versions of above decreasing in powers-of-2 depending on code-size concerns.
end_rows:
# Not done.
exit:
ld s0, OFFSET(sp)
ld s1, OFFSET(sp)
ld s2, OFFSET(sp)
addi sp, sp, FRAMESIZE
ret

View file

@ -0,0 +1,20 @@
.type vx_vec_vsadd, @function
.global vx_vec_vsadd
# vector-scalar add
# N = a0, a[] = a1, B = a2
# for (i=0; i<N; i++)
# { C[i] = A[i] + B; } // 32-bit ints
#
vx_vec_vsadd:
# vcfg2*V32bINT, 1*S32bINT #
# vmv v2, a2 # Copy B to vector unit scalar
loop:
# setvl t0, a0 # a0 holds N, t0 holds amount done
ld v0, a1 # load strip of A vector
vadd v1, v0, v2 # add vectors
st v1, a3 # store strip of C vector
slli t1, t0, 2 # multiply by 4 to get bytes
add a1, a1, t1 # bump pointers
add a3, a3, t1
sub a0, a0, t0 # Subtract amount done
bnez a0, loop

View file

@ -0,0 +1,22 @@
.type vx_vec_vvaddi32, @function
.global vx_vec_vvaddint32
# vector-vector add routine of 32-bit integers
# void vvaddint32(size_t n, const int*x, const int*y, int*z)
# { for (size_t i=0; i<n; i++) { z[i]=x[i]+y[i]; } }
#
# a0 = n, a1 = x, a2 = y, a3 = z
# Non-vector instructions are indented
vx_vec_vvaddint32:
vsetvli t0, a0, e32 # Set vector length based on 32-bit vectors
loop:
vlw.v v0, (a1) # Get first vector
sub a0, a0, t0 # Decrement number done
slli t0, t0, 2 # Multiply number done by 4 bytes
add a1, a1, t0 # Bump pointer
vlw.v v1, (a2) # Get second vector
add a2, a2, t0 # Bump pointer
vadd.vv v2, v0, v1 # Sum vectors
vsw.v v2, (a3) # Store result
add a3, a3, t0 # Bump pointer
bnez a0, loop # Loop back
ret # Finished

6
simX/test_benchmark.sh Executable file
View file

@ -0,0 +1,6 @@
echo start > results.txt
# echo ../kernel/vortex_test.hex
make
printf "Fasten your seatbelts ladies and gentelmen!!\n\n\n\n"
cd obj_dir && ./Vcache_simX -E -a rv32i --core ../../rvvector/benchmark_temp/vx_vec_benchmark.hex -s -b 1> emulator.debug