Rework performance counters such that VCS is happy

This commit is contained in:
Michael Schaffner 2019-04-25 16:34:35 +02:00 committed by Florian Zaruba
parent 63021b9d4f
commit b8ce24c3ab

View file

@ -14,117 +14,118 @@
import ariane_pkg::*;
module perf_counters #(
int unsigned NR_EXTERNAL_COUNTERS = 1
)(
input logic clk_i,
input logic rst_ni,
input logic debug_mode_i, // debug mode
// SRAM like interface
input logic [4:0] addr_i, // read/write address (up to 29 aux counters possible in riscv encoding.h)
input logic we_i, // write enable
input logic [63:0] data_i, // data to write
output logic [63:0] data_o, // data to read
// from commit stage
input scoreboard_entry_t [NR_COMMIT_PORTS-1:0] commit_instr_i, // the instruction we want to commit
input logic [NR_COMMIT_PORTS-1:0] commit_ack_i, // acknowledge that we are indeed committing
module perf_counters (
input logic clk_i,
input logic rst_ni,
input logic debug_mode_i, // debug mode
// SRAM like interface
input logic [4:0] addr_i, // read/write address (up to 29 aux counters possible in riscv encoding.h)
input logic we_i, // write enable
input logic [63:0] data_i, // data to write
output logic [63:0] data_o, // data to read
// from commit stage
input scoreboard_entry_t [NR_COMMIT_PORTS-1:0] commit_instr_i, // the instruction we want to commit
input logic [NR_COMMIT_PORTS-1:0] commit_ack_i, // acknowledge that we are indeed committing
// from L1 caches
input logic l1_icache_miss_i,
input logic l1_dcache_miss_i,
// from MMU
input logic itlb_miss_i,
input logic dtlb_miss_i,
// from issue stage
input logic sb_full_i,
// from frontend
input logic if_empty_i,
// from PC Gen
input exception_t ex_i,
input logic eret_i,
input branchpredict_t resolved_branch_i
// from L1 caches
input logic l1_icache_miss_i,
input logic l1_dcache_miss_i,
// from MMU
input logic itlb_miss_i,
input logic dtlb_miss_i,
// from issue stage
input logic sb_full_i,
// from frontend
input logic if_empty_i,
// from PC Gen
input exception_t ex_i,
input logic eret_i,
input branchpredict_t resolved_branch_i
);
logic [riscv::CSR_MIF_EMPTY[4:0] : riscv::CSR_ML1_ICACHE_MISS[4:0]][63:0] perf_counter_d, perf_counter_q;
always_comb begin : perf_counters
perf_counter_d = perf_counter_q;
data_o = 'b0;
localparam logic [6:0] RegOffset = riscv::CSR_ML1_ICACHE_MISS>>5;
// don't increment counters in debug mode
if (!debug_mode_i) begin
// ------------------------------
// Update Performance Counters
// ------------------------------
if (l1_icache_miss_i)
perf_counter_d[riscv::CSR_ML1_ICACHE_MISS[4:0]] = perf_counter_q[riscv::CSR_ML1_ICACHE_MISS[4:0]] + 1'b1;
logic [riscv::CSR_MIF_EMPTY : riscv::CSR_ML1_ICACHE_MISS][63:0] perf_counter_d, perf_counter_q;
if (l1_dcache_miss_i)
perf_counter_d[riscv::CSR_ML1_DCACHE_MISS[4:0]] = perf_counter_q[riscv::CSR_ML1_DCACHE_MISS[4:0]] + 1'b1;
always_comb begin : perf_counters
perf_counter_d = perf_counter_q;
data_o = 'b0;
if (itlb_miss_i)
perf_counter_d[riscv::CSR_MITLB_MISS[4:0]] = perf_counter_q[riscv::CSR_MITLB_MISS[4:0]] + 1'b1;
// don't increment counters in debug mode
if (!debug_mode_i) begin
// ------------------------------
// Update Performance Counters
// ------------------------------
if (l1_icache_miss_i)
perf_counter_d[riscv::CSR_ML1_ICACHE_MISS] = perf_counter_q[riscv::CSR_ML1_ICACHE_MISS] + 1'b1;
if (dtlb_miss_i)
perf_counter_d[riscv::CSR_MDTLB_MISS[4:0]] = perf_counter_q[riscv::CSR_MDTLB_MISS[4:0]] + 1'b1;
if (l1_dcache_miss_i)
perf_counter_d[riscv::CSR_ML1_DCACHE_MISS] = perf_counter_q[riscv::CSR_ML1_DCACHE_MISS] + 1'b1;
// instruction related perf counters
for (int unsigned i = 0; i < NR_COMMIT_PORTS-1; i++) begin
if (commit_ack_i[i]) begin
if (commit_instr_i[i].fu == LOAD)
perf_counter_d[riscv::CSR_MLOAD[4:0]] = perf_counter_q[riscv::CSR_MLOAD[4:0]] + 1'b1;
if (itlb_miss_i)
perf_counter_d[riscv::CSR_MITLB_MISS] = perf_counter_q[riscv::CSR_MITLB_MISS] + 1'b1;
if (commit_instr_i[i].fu == STORE)
perf_counter_d[riscv::CSR_MSTORE[4:0]] = perf_counter_q[riscv::CSR_MSTORE[4:0]] + 1'b1;
if (dtlb_miss_i)
perf_counter_d[riscv::CSR_MDTLB_MISS] = perf_counter_q[riscv::CSR_MDTLB_MISS] + 1'b1;
if (commit_instr_i[i].fu == CTRL_FLOW)
perf_counter_d[riscv::CSR_MBRANCH_JUMP[4:0]] = perf_counter_q[riscv::CSR_MBRANCH_JUMP[4:0]] + 1'b1;
// instruction related perf counters
for (int unsigned i = 0; i < NR_COMMIT_PORTS-1; i++) begin
if (commit_ack_i[i]) begin
if (commit_instr_i[i].fu == LOAD)
perf_counter_d[riscv::CSR_MLOAD] = perf_counter_q[riscv::CSR_MLOAD] + 1'b1;
// The standard software calling convention uses register x1 to hold the return address on a call
// the unconditional jump is decoded as ADD op
if (commit_instr_i[i].fu == CTRL_FLOW && commit_instr_i[i].op == '0
&& (commit_instr_i[i].rd == 'd1 || commit_instr_i[i].rd == 'd1))
perf_counter_d[riscv::CSR_MCALL[4:0]] = perf_counter_q[riscv::CSR_MCALL[4:0]] + 1'b1;
if (commit_instr_i[i].fu == STORE)
perf_counter_d[riscv::CSR_MSTORE] = perf_counter_q[riscv::CSR_MSTORE] + 1'b1;
// Return from call
if (commit_instr_i[i].op == JALR && (commit_instr_i[i].rd == 'd1 || commit_instr_i[i].rd == 'd1))
perf_counter_d[riscv::CSR_MRET[4:0]] = perf_counter_q[riscv::CSR_MRET[4:0]] + 1'b1;
end
end
if (commit_instr_i[i].fu == CTRL_FLOW)
perf_counter_d[riscv::CSR_MBRANCH_JUMP] = perf_counter_q[riscv::CSR_MBRANCH_JUMP] + 1'b1;
if (ex_i.valid)
perf_counter_d[riscv::CSR_MEXCEPTION[4:0]] = perf_counter_q[riscv::CSR_MEXCEPTION[4:0]] + 1'b1;
// The standard software calling convention uses register x1 to hold the return address on a call
// the unconditional jump is decoded as ADD op
if (commit_instr_i[i].fu == CTRL_FLOW && commit_instr_i[i].op == '0
&& (commit_instr_i[i].rd == 'd1 || commit_instr_i[i].rd == 'd1))
perf_counter_d[riscv::CSR_MCALL] = perf_counter_q[riscv::CSR_MCALL] + 1'b1;
if (eret_i)
perf_counter_d[riscv::CSR_MEXCEPTION_RET[4:0]] = perf_counter_q[riscv::CSR_MEXCEPTION_RET[4:0]] + 1'b1;
if (resolved_branch_i.valid && resolved_branch_i.is_mispredict)
perf_counter_d[riscv::CSR_MMIS_PREDICT[4:0]] = perf_counter_q[riscv::CSR_MMIS_PREDICT[4:0]] + 1'b1;
if (sb_full_i) begin
perf_counter_d[riscv::CSR_MSB_FULL[4:0]] = perf_counter_q[riscv::CSR_MSB_FULL[4:0]] + 1'b1;
end
if (if_empty_i) begin
perf_counter_d[riscv::CSR_MIF_EMPTY[4:0]] = perf_counter_q[riscv::CSR_MIF_EMPTY[4:0]] + 1'b1;
end
// Return from call
if (commit_instr_i[i].op == JALR && (commit_instr_i[i].rd == 'd1 || commit_instr_i[i].rd == 'd1))
perf_counter_d[riscv::CSR_MRET] = perf_counter_q[riscv::CSR_MRET] + 1'b1;
end
end
// write after read
data_o = perf_counter_q[addr_i];
if (we_i) begin
perf_counter_d[addr_i] = data_i;
end
if (ex_i.valid)
perf_counter_d[riscv::CSR_MEXCEPTION] = perf_counter_q[riscv::CSR_MEXCEPTION] + 1'b1;
if (eret_i)
perf_counter_d[riscv::CSR_MEXCEPTION_RET] = perf_counter_q[riscv::CSR_MEXCEPTION_RET] + 1'b1;
if (resolved_branch_i.valid && resolved_branch_i.is_mispredict)
perf_counter_d[riscv::CSR_MMIS_PREDICT] = perf_counter_q[riscv::CSR_MMIS_PREDICT] + 1'b1;
if (sb_full_i) begin
perf_counter_d[riscv::CSR_MSB_FULL] = perf_counter_q[riscv::CSR_MSB_FULL] + 1'b1;
end
if (if_empty_i) begin
perf_counter_d[riscv::CSR_MIF_EMPTY] = perf_counter_q[riscv::CSR_MIF_EMPTY] + 1'b1;
end
end
// ----------------
// Registers
// ----------------
always_ff @(posedge clk_i or negedge rst_ni) begin
if (~rst_ni) begin
perf_counter_q <= '0;
end else begin
perf_counter_q <= perf_counter_d;
end
// write after read
data_o = perf_counter_q[{RegOffset,addr_i}];
if (we_i) begin
perf_counter_d[{RegOffset,addr_i}] = data_i;
end
end
// ----------------
// Registers
// ----------------
always_ff @(posedge clk_i or negedge rst_ni) begin
if (!rst_ni) begin
perf_counter_q <= '0;
end else begin
perf_counter_q <= perf_counter_d;
end
end
endmodule