pcie: Add PCIe AXI lite master (minimal) module and testbench

Signed-off-by: Alex Forencich <alex@alexforencich.com>
This commit is contained in:
Alex Forencich
2025-04-29 22:26:19 -07:00
parent 093373a2b3
commit df87e87e2d
5 changed files with 1218 additions and 0 deletions

View File

@@ -0,0 +1,676 @@
// SPDX-License-Identifier: CERN-OHL-S-2.0
/*
Copyright (c) 2021-2025 FPGA Ninja, LLC
Authors:
- Alex Forencich
*/
`resetall
`timescale 1ns / 1ps
`default_nettype none
/*
* PCIe AXI Lite Master (minimal version, supports only aligned, 1 DW operations)
*/
module taxi_pcie_axil_master_minimal #
(
// Force 64 bit address
parameter logic TLP_FORCE_64_BIT_ADDR = 1'b0
)
(
input wire logic clk,
input wire logic rst,
/*
* TLP input (request)
*/
taxi_pcie_tlp_if.snk rx_req_tlp,
/*
* TLP output (completion)
*/
taxi_pcie_tlp_if.src tx_cpl_tlp,
/*
* AXI Lite Master output
*/
taxi_axil_if.wr_mst m_axil_wr,
taxi_axil_if.rd_mst m_axil_rd,
/*
* Configuration
*/
input wire logic [7:0] bus_num,
/*
* Status
*/
output wire logic stat_err_cor,
output wire logic stat_err_uncor
);
// extract parameters
localparam TLP_SEGS = rx_req_tlp.SEGS;
localparam TLP_SEG_DATA_W = rx_req_tlp.SEG_DATA_W;
localparam TLP_SEG_EMPTY_W = rx_req_tlp.SEG_EMPTY_W;
localparam TLP_DATA_W = TLP_SEGS*TLP_SEG_DATA_W;
localparam TLP_HDR_W = rx_req_tlp.HDR_W;
localparam FUNC_NUM_W = rx_req_tlp.FUNC_NUM_W;
localparam AXIL_DATA_W = m_axil_wr.DATA_W;
localparam AXIL_ADDR_W = m_axil_wr.ADDR_W;
localparam AXIL_STRB_W = m_axil_wr.STRB_W;
localparam TLP_DATA_W_B = TLP_DATA_W/8;
localparam TLP_DATA_W_DW = TLP_DATA_W/32;
localparam TAG_W = 10;
localparam RESP_FIFO_ADDR_W = 5;
// check configuration
if (TLP_SEGS != 1)
$fatal(0, "Error: TLP segment count must be 1 (instance %m)");
if (TLP_HDR_W != 128)
$fatal(0, "Error: TLP segment header width must be 128 (instance %m)");
if ((2**TLP_SEG_EMPTY_W)*32*TLP_SEGS != TLP_DATA_W)
$fatal(0, "Error: PCIe interface requires dword (32-bit) granularity (instance %m)");
if (AXIL_DATA_W != 32)
$fatal(0, "Error: AXI lite interface width must be 32 (instance %m)");
if (AXIL_STRB_W * 8 != AXIL_DATA_W)
$fatal(0, "Error: AXI lite interface requires byte (8-bit) granularity (instance %m)");
localparam [2:0]
TLP_FMT_3DW = 3'b000,
TLP_FMT_4DW = 3'b001,
TLP_FMT_3DW_DATA = 3'b010,
TLP_FMT_4DW_DATA = 3'b011,
TLP_FMT_PREFIX = 3'b100;
localparam [2:0]
CPL_STATUS_SC = 3'b000, // successful completion
CPL_STATUS_UR = 3'b001, // unsupported request
CPL_STATUS_CRS = 3'b010, // configuration request retry status
CPL_STATUS_CA = 3'b100; // completer abort
localparam [0:0]
REQ_STATE_IDLE = 1'd0,
REQ_STATE_WAIT_END = 1'd1;
logic [0:0] req_state_reg = REQ_STATE_IDLE, req_state_next;
localparam [1:0]
RESP_STATE_IDLE = 2'd0,
RESP_STATE_READ = 2'd1,
RESP_STATE_WRITE = 2'd2,
RESP_STATE_CPL = 2'd3;
logic [1:0] resp_state_reg = RESP_STATE_IDLE, resp_state_next;
logic [2:0] rx_req_tlp_hdr_fmt;
logic [4:0] rx_req_tlp_hdr_type;
logic [2:0] rx_req_tlp_hdr_tc;
logic rx_req_tlp_hdr_ln;
logic rx_req_tlp_hdr_th;
logic rx_req_tlp_hdr_td;
logic rx_req_tlp_hdr_ep;
logic [2:0] rx_req_tlp_hdr_attr;
logic [1:0] rx_req_tlp_hdr_at;
logic [10:0] rx_req_tlp_hdr_length;
logic [15:0] rx_req_tlp_hdr_requester_id;
logic [TAG_W-1:0] rx_req_tlp_hdr_tag;
logic [3:0] rx_req_tlp_hdr_last_be;
logic [3:0] rx_req_tlp_hdr_first_be;
logic [63:0] rx_req_tlp_hdr_addr;
logic [1:0] rx_req_tlp_hdr_ph;
logic [127:0] cpl_tlp_hdr;
logic [RESP_FIFO_ADDR_W+1-1:0] resp_fifo_wr_ptr_reg = 0;
logic [RESP_FIFO_ADDR_W+1-1:0] resp_fifo_rd_ptr_reg = 0, resp_fifo_rd_ptr_next;
(* ram_style = "distributed", ramstyle = "no_rw_check, mlab" *)
logic resp_fifo_op_read[2**RESP_FIFO_ADDR_W];
(* ram_style = "distributed", ramstyle = "no_rw_check, mlab" *)
logic resp_fifo_op_write[2**RESP_FIFO_ADDR_W];
(* ram_style = "distributed", ramstyle = "no_rw_check, mlab" *)
logic [2:0] resp_fifo_cpl_status[2**RESP_FIFO_ADDR_W];
(* ram_style = "distributed", ramstyle = "no_rw_check, mlab" *)
logic [2:0] resp_fifo_byte_count[2**RESP_FIFO_ADDR_W];
(* ram_style = "distributed", ramstyle = "no_rw_check, mlab" *)
logic [6:0] resp_fifo_lower_addr[2**RESP_FIFO_ADDR_W];
(* ram_style = "distributed", ramstyle = "no_rw_check, mlab" *)
logic [15:0] resp_fifo_requester_id[2**RESP_FIFO_ADDR_W];
(* ram_style = "distributed", ramstyle = "no_rw_check, mlab" *)
logic [FUNC_NUM_W-1:0] resp_fifo_func_num[2**RESP_FIFO_ADDR_W];
(* ram_style = "distributed", ramstyle = "no_rw_check, mlab" *)
logic [TAG_W-1:0] resp_fifo_tag[2**RESP_FIFO_ADDR_W];
(* ram_style = "distributed", ramstyle = "no_rw_check, mlab" *)
logic [2:0] resp_fifo_tc[2**RESP_FIFO_ADDR_W];
(* ram_style = "distributed", ramstyle = "no_rw_check, mlab" *)
logic [2:0] resp_fifo_attr[2**RESP_FIFO_ADDR_W];
logic resp_fifo_wr_op_read;
logic resp_fifo_wr_op_write;
logic [2:0] resp_fifo_wr_cpl_status;
logic [2:0] resp_fifo_wr_byte_count;
logic [6:0] resp_fifo_wr_lower_addr;
logic [15:0] resp_fifo_wr_requester_id;
logic [FUNC_NUM_W-1:0] resp_fifo_wr_func_num;
logic [TAG_W-1:0] resp_fifo_wr_tag;
logic [2:0] resp_fifo_wr_tc;
logic [2:0] resp_fifo_wr_attr;
logic resp_fifo_we;
logic resp_fifo_half_full_reg = 1'b0;
logic resp_fifo_rd_op_read_reg = 1'b0, resp_fifo_rd_op_read_next;
logic resp_fifo_rd_op_write_reg = 1'b0, resp_fifo_rd_op_write_next;
logic [2:0] resp_fifo_rd_cpl_status_reg = CPL_STATUS_SC, resp_fifo_rd_cpl_status_next;
logic [2:0] resp_fifo_rd_byte_count_reg = '0, resp_fifo_rd_byte_count_next;
logic [6:0] resp_fifo_rd_lower_addr_reg = '0, resp_fifo_rd_lower_addr_next;
logic [15:0] resp_fifo_rd_requester_id_reg = '0, resp_fifo_rd_requester_id_next;
logic [FUNC_NUM_W-1:0] resp_fifo_rd_func_num_reg = '0, resp_fifo_rd_func_num_next;
logic [TAG_W-1:0] resp_fifo_rd_tag_reg = '0, resp_fifo_rd_tag_next;
logic [2:0] resp_fifo_rd_tc_reg = '0, resp_fifo_rd_tc_next;
logic [2:0] resp_fifo_rd_attr_reg = '0, resp_fifo_rd_attr_next;
logic resp_fifo_rd_valid_reg = 1'b0, resp_fifo_rd_valid_next;
logic rx_req_tlp_ready_reg = 1'b0, rx_req_tlp_ready_next;
logic [TLP_DATA_W-1:0] tx_cpl_tlp_data_reg = '0, tx_cpl_tlp_data_next;
logic [TLP_SEGS-1:0][TLP_SEG_EMPTY_W-1:0] tx_cpl_tlp_empty_reg = '0, tx_cpl_tlp_empty_next;
logic [TLP_SEGS-1:0][TLP_HDR_W-1:0] tx_cpl_tlp_hdr_reg = '0, tx_cpl_tlp_hdr_next;
logic [TLP_SEGS-1:0] tx_cpl_tlp_valid_reg = 0, tx_cpl_tlp_valid_next;
logic [AXIL_ADDR_W-1:0] m_axil_addr_reg = '0, m_axil_addr_next;
logic m_axil_awvalid_reg = 1'b0, m_axil_awvalid_next;
logic [AXIL_DATA_W-1:0] m_axil_wdata_reg = '0, m_axil_wdata_next;
logic [AXIL_STRB_W-1:0] m_axil_wstrb_reg = '0, m_axil_wstrb_next;
logic m_axil_wvalid_reg = 1'b0, m_axil_wvalid_next;
logic m_axil_bready_reg = 1'b0, m_axil_bready_next;
logic m_axil_arvalid_reg = 1'b0, m_axil_arvalid_next;
logic m_axil_rready_reg = 1'b0, m_axil_rready_next;
logic stat_err_cor_reg = 1'b0, stat_err_cor_next;
logic stat_err_uncor_reg = 1'b0, stat_err_uncor_next;
assign rx_req_tlp.ready = rx_req_tlp_ready_reg;
assign tx_cpl_tlp.data = tx_cpl_tlp_data_reg;
assign tx_cpl_tlp.empty = tx_cpl_tlp_empty_reg;
assign tx_cpl_tlp.hdr = tx_cpl_tlp_hdr_reg;
assign tx_cpl_tlp.seq = '0;
assign tx_cpl_tlp.bar_id = '0;
assign tx_cpl_tlp.func_num = '0;
assign tx_cpl_tlp.error = '0;
assign tx_cpl_tlp.valid = tx_cpl_tlp_valid_reg;
assign tx_cpl_tlp.sop = 1'b1;
assign tx_cpl_tlp.eop = 1'b1;
assign m_axil_wr.awaddr = m_axil_addr_reg;
assign m_axil_wr.awprot = 3'b010;
assign m_axil_wr.awuser = '0;
assign m_axil_wr.awvalid = m_axil_awvalid_reg;
assign m_axil_wr.wdata = m_axil_wdata_reg;
assign m_axil_wr.wstrb = m_axil_wstrb_reg;
assign m_axil_wr.wuser = '0;
assign m_axil_wr.wvalid = m_axil_wvalid_reg;
assign m_axil_wr.bready = m_axil_bready_reg;
assign m_axil_rd.araddr = m_axil_addr_reg;
assign m_axil_rd.arprot = 3'b010;
assign m_axil_rd.aruser = '0;
assign m_axil_rd.arvalid = m_axil_arvalid_reg;
assign m_axil_rd.rready = m_axil_rready_reg;
assign stat_err_cor = stat_err_cor_reg;
assign stat_err_uncor = stat_err_uncor_reg;
always_comb begin
req_state_next = REQ_STATE_IDLE;
rx_req_tlp_ready_next = 1'b0;
m_axil_addr_next = m_axil_addr_reg;
m_axil_awvalid_next = m_axil_awvalid_reg && !m_axil_wr.awready;
m_axil_wdata_next = m_axil_wdata_reg;
m_axil_wstrb_next = m_axil_wstrb_reg;
m_axil_wvalid_next = m_axil_wvalid_reg && !m_axil_wr.wready;
m_axil_arvalid_next = m_axil_arvalid_reg && !m_axil_rd.arready;
stat_err_cor_next = 1'b0;
stat_err_uncor_next = 1'b0;
// TLP header parsing
// DW 0
rx_req_tlp_hdr_fmt = rx_req_tlp.hdr[0][127:125]; // fmt
rx_req_tlp_hdr_type = rx_req_tlp.hdr[0][124:120]; // type
rx_req_tlp_hdr_tag[9] = rx_req_tlp.hdr[0][119]; // T9
rx_req_tlp_hdr_tc = rx_req_tlp.hdr[0][118:116]; // TC
rx_req_tlp_hdr_tag[8] = rx_req_tlp.hdr[0][115]; // T8
rx_req_tlp_hdr_attr[2] = rx_req_tlp.hdr[0][114]; // attr
rx_req_tlp_hdr_ln = rx_req_tlp.hdr[0][113]; // LN
rx_req_tlp_hdr_th = rx_req_tlp.hdr[0][112]; // TH
rx_req_tlp_hdr_td = rx_req_tlp.hdr[0][111]; // TD
rx_req_tlp_hdr_ep = rx_req_tlp.hdr[0][110]; // EP
rx_req_tlp_hdr_attr[1:0] = rx_req_tlp.hdr[0][109:108]; // attr
rx_req_tlp_hdr_at = rx_req_tlp.hdr[0][107:106]; // AT
rx_req_tlp_hdr_length = {rx_req_tlp.hdr[0][105:96] == 0, rx_req_tlp.hdr[0][105:96]}; // length
// DW 1
rx_req_tlp_hdr_requester_id = rx_req_tlp.hdr[0][95:80]; // requester ID
rx_req_tlp_hdr_tag[7:0] = rx_req_tlp.hdr[0][79:72]; // tag
rx_req_tlp_hdr_last_be = rx_req_tlp.hdr[0][71:68]; // last BE
rx_req_tlp_hdr_first_be = rx_req_tlp.hdr[0][67:64]; // first BE
if (rx_req_tlp_hdr_fmt[0] || TLP_FORCE_64_BIT_ADDR) begin
// 4 DW (64-bit address)
// DW 2+3
rx_req_tlp_hdr_addr = {rx_req_tlp.hdr[0][63:2], 2'b00}; // addr
rx_req_tlp_hdr_ph = rx_req_tlp.hdr[0][1:0]; // PH
end else begin
// 3 DW (32-bit address)
// DW 2
rx_req_tlp_hdr_addr = {32'd0, rx_req_tlp.hdr[0][63:34], 2'b00}; // addr
rx_req_tlp_hdr_ph = rx_req_tlp.hdr[0][33:32]; // PH
end
resp_fifo_wr_op_read = 1'b0;
resp_fifo_wr_op_write = 1'b0;
resp_fifo_wr_cpl_status = CPL_STATUS_SC;
resp_fifo_wr_byte_count = '0;
resp_fifo_wr_lower_addr = '0;
resp_fifo_wr_requester_id = rx_req_tlp_hdr_requester_id;
resp_fifo_wr_func_num = rx_req_tlp.func_num[0];
resp_fifo_wr_tag = rx_req_tlp_hdr_tag;
resp_fifo_wr_tc = rx_req_tlp_hdr_tc;
resp_fifo_wr_attr = rx_req_tlp_hdr_attr;
resp_fifo_we = 1'b0;
case (req_state_reg)
REQ_STATE_IDLE: begin
// idle state; wait for request
rx_req_tlp_ready_next = (!m_axil_awvalid_reg || m_axil_wr.awready)
&& (!m_axil_arvalid_reg || m_axil_rd.arready)
&& (!m_axil_wvalid_reg || m_axil_wr.wready)
&& !resp_fifo_half_full_reg;
if (rx_req_tlp.ready && rx_req_tlp.valid[0] && rx_req_tlp.sop[0]) begin
m_axil_addr_next = rx_req_tlp_hdr_addr;
m_axil_wdata_next = rx_req_tlp.data[0][31:0];
m_axil_wstrb_next = rx_req_tlp_hdr_first_be;
if (!rx_req_tlp_hdr_fmt[1] && rx_req_tlp_hdr_type == 5'b00000) begin
// read request
if (rx_req_tlp_hdr_length == 11'd1) begin
// length OK
// perform read
m_axil_arvalid_next = 1'b1;
// finish read and return completion
resp_fifo_wr_op_read = 1'b1;
resp_fifo_wr_op_write = 1'b0;
resp_fifo_wr_cpl_status = CPL_STATUS_SC;
casez (rx_req_tlp_hdr_first_be)
4'b0000: resp_fifo_wr_byte_count = 3'd1;
4'b0001: resp_fifo_wr_byte_count = 3'd1;
4'b0010: resp_fifo_wr_byte_count = 3'd1;
4'b0100: resp_fifo_wr_byte_count = 3'd1;
4'b1000: resp_fifo_wr_byte_count = 3'd1;
4'b0011: resp_fifo_wr_byte_count = 3'd2;
4'b0110: resp_fifo_wr_byte_count = 3'd2;
4'b1100: resp_fifo_wr_byte_count = 3'd2;
4'b01z1: resp_fifo_wr_byte_count = 3'd3;
4'b1z10: resp_fifo_wr_byte_count = 3'd3;
4'b1zz1: resp_fifo_wr_byte_count = 3'd4;
endcase
casez (rx_req_tlp_hdr_first_be)
4'b0000: resp_fifo_wr_lower_addr = {rx_req_tlp_hdr_addr[6:2], 2'b00};
4'bzzz1: resp_fifo_wr_lower_addr = {rx_req_tlp_hdr_addr[6:2], 2'b00};
4'bzz10: resp_fifo_wr_lower_addr = {rx_req_tlp_hdr_addr[6:2], 2'b01};
4'bz100: resp_fifo_wr_lower_addr = {rx_req_tlp_hdr_addr[6:2], 2'b10};
4'b1000: resp_fifo_wr_lower_addr = {rx_req_tlp_hdr_addr[6:2], 2'b11};
endcase
rx_req_tlp_ready_next = 1'b0;
end else begin
// bad length
// report correctable error
stat_err_cor_next = 1'b1;
// return CA completion
resp_fifo_wr_op_read = 1'b0;
resp_fifo_wr_op_write = 1'b0;
resp_fifo_wr_cpl_status = CPL_STATUS_CA;
resp_fifo_wr_byte_count = '0;
resp_fifo_wr_lower_addr = '0;
end
resp_fifo_wr_requester_id = rx_req_tlp_hdr_requester_id;
resp_fifo_wr_func_num = rx_req_tlp.func_num[0];
resp_fifo_wr_tag = rx_req_tlp_hdr_tag;
resp_fifo_wr_tc = rx_req_tlp_hdr_tc;
resp_fifo_wr_attr = rx_req_tlp_hdr_attr;
resp_fifo_we = 1'b1;
if (rx_req_tlp.eop[0]) begin
req_state_next = REQ_STATE_IDLE;
end else begin
rx_req_tlp_ready_next = 1'b1;
req_state_next = REQ_STATE_WAIT_END;
end
end else if (rx_req_tlp_hdr_fmt[1] && rx_req_tlp_hdr_type == 5'b00000) begin
// write request
if (rx_req_tlp_hdr_length == 11'd1) begin
// length OK
// perform write
m_axil_awvalid_next = 1'b1;
m_axil_wvalid_next = 1'b1;
// entry in FIFO for proper response ordering
resp_fifo_wr_op_read = 1'b0;
resp_fifo_wr_op_write = 1'b1;
resp_fifo_we = 1'b1;
rx_req_tlp_ready_next = 1'b0;
end else begin
// bad length
// report uncorrectable error
stat_err_uncor_next = 1'b1;
end
if (rx_req_tlp.eop) begin
req_state_next = REQ_STATE_IDLE;
end else begin
rx_req_tlp_ready_next = 1'b1;
req_state_next = REQ_STATE_WAIT_END;
end
end else begin
// other request
if (rx_req_tlp_hdr_fmt[0] && ((rx_req_tlp_hdr_type & 5'b11000) == 5'b10000)) begin
// message - posted, no completion
// report uncorrectable error
stat_err_uncor_next = 1'b1;
end else if (!rx_req_tlp_hdr_fmt[0] && (rx_req_tlp_hdr_type == 5'b01010 || rx_req_tlp_hdr_type == 5'b01011)) begin
// completion TLP
// unexpected completion, advisory non-fatal error
// report correctable error
stat_err_cor_next = 1'b1;
end else begin
// other non-posted request, send UR completion
// report correctable error
stat_err_cor_next = 1'b1;
// UR completion
resp_fifo_wr_op_read = 1'b0;
resp_fifo_wr_op_write = 1'b0;
resp_fifo_wr_cpl_status = CPL_STATUS_UR;
resp_fifo_wr_byte_count = '0;
resp_fifo_wr_lower_addr = '0;
resp_fifo_wr_requester_id = rx_req_tlp_hdr_requester_id;
resp_fifo_wr_func_num = rx_req_tlp.func_num[0];
resp_fifo_wr_tag = rx_req_tlp_hdr_tag;
resp_fifo_wr_tc = rx_req_tlp_hdr_tc;
resp_fifo_wr_attr = rx_req_tlp_hdr_attr;
resp_fifo_we = 1'b1;
end
if (rx_req_tlp.eop) begin
req_state_next = REQ_STATE_IDLE;
end else begin
rx_req_tlp_ready_next = 1'b1;
req_state_next = REQ_STATE_WAIT_END;
end
end
end else begin
req_state_next = REQ_STATE_IDLE;
end
end
REQ_STATE_WAIT_END: begin
// wait end state, wait for end of TLP
rx_req_tlp_ready_next = 1'b1;
if (rx_req_tlp.ready && rx_req_tlp.valid[0]) begin
if (rx_req_tlp.eop) begin
rx_req_tlp_ready_next = (!m_axil_awvalid_reg || m_axil_wr.awready)
&& (!m_axil_arvalid_reg || m_axil_rd.arready)
&& (!m_axil_wvalid_reg || m_axil_wr.wready)
&& !resp_fifo_half_full_reg;
req_state_next = REQ_STATE_IDLE;
end else begin
req_state_next = REQ_STATE_WAIT_END;
end
end else begin
req_state_next = REQ_STATE_WAIT_END;
end
end
endcase
end
always_comb begin
resp_state_next = RESP_STATE_IDLE;
resp_fifo_rd_ptr_next = resp_fifo_rd_ptr_reg;
resp_fifo_rd_op_read_next = resp_fifo_rd_op_read_reg;
resp_fifo_rd_op_write_next = resp_fifo_rd_op_write_reg;
resp_fifo_rd_cpl_status_next = resp_fifo_rd_cpl_status_reg;
resp_fifo_rd_byte_count_next = resp_fifo_rd_byte_count_reg;
resp_fifo_rd_lower_addr_next = resp_fifo_rd_lower_addr_reg;
resp_fifo_rd_requester_id_next = resp_fifo_rd_requester_id_reg;
resp_fifo_rd_func_num_next = resp_fifo_rd_func_num_reg;
resp_fifo_rd_tag_next = resp_fifo_rd_tag_reg;
resp_fifo_rd_tc_next = resp_fifo_rd_tc_reg;
resp_fifo_rd_attr_next = resp_fifo_rd_attr_reg;
resp_fifo_rd_valid_next = resp_fifo_rd_valid_reg;
tx_cpl_tlp_data_next = tx_cpl_tlp_data_reg;
tx_cpl_tlp_empty_next = tx_cpl_tlp_empty_reg;
tx_cpl_tlp_hdr_next = tx_cpl_tlp_hdr_reg;
tx_cpl_tlp_valid_next = tx_cpl_tlp_valid_reg && !tx_cpl_tlp.ready;
m_axil_bready_next = 1'b0;
m_axil_rready_next = 1'b0;
// TLP header
// DW 0
cpl_tlp_hdr[127:125] = resp_fifo_rd_op_read_reg ? TLP_FMT_3DW_DATA : TLP_FMT_3DW; // fmt
cpl_tlp_hdr[124:120] = 5'b01010; // type
cpl_tlp_hdr[119] = resp_fifo_rd_tag_reg[9]; // T9
cpl_tlp_hdr[118:116] = resp_fifo_rd_tc_reg; // TC
cpl_tlp_hdr[115] = resp_fifo_rd_tag_reg[8]; // T8
cpl_tlp_hdr[114] = resp_fifo_rd_attr_reg[2]; // attr
cpl_tlp_hdr[113] = 1'b0; // LN
cpl_tlp_hdr[112] = 1'b0; // TH
cpl_tlp_hdr[111] = 1'b0; // TD
cpl_tlp_hdr[110] = 1'b0; // EP
cpl_tlp_hdr[109:108] = resp_fifo_rd_attr_reg[1:0]; // attr
cpl_tlp_hdr[107:106] = 2'b00; // AT
cpl_tlp_hdr[105:96] = resp_fifo_rd_op_read_reg ? 1 : 0; // length
// DW 1
cpl_tlp_hdr[95:88] = bus_num; // completer ID (bus number)
cpl_tlp_hdr[87:80] = 8'(resp_fifo_rd_func_num_reg); // completer ID (function number)
cpl_tlp_hdr[79:77] = resp_fifo_rd_cpl_status_reg; // completion status
cpl_tlp_hdr[76] = 1'b0; // BCM
cpl_tlp_hdr[75:64] = 12'(resp_fifo_rd_byte_count_reg); // byte count
// DW 2
cpl_tlp_hdr[63:48] = resp_fifo_rd_requester_id_reg; // requester ID
cpl_tlp_hdr[47:40] = resp_fifo_rd_tag_reg[7:0]; // tag
cpl_tlp_hdr[39] = 1'b0;
cpl_tlp_hdr[38:32] = resp_fifo_rd_lower_addr_reg; // lower address
cpl_tlp_hdr[31:0] = '0;
case (resp_state_reg)
RESP_STATE_IDLE: begin
// idle state - wait for operation
if (resp_fifo_rd_valid_reg) begin
if (resp_fifo_rd_op_read_reg) begin
m_axil_rready_next = !tx_cpl_tlp_valid_reg || tx_cpl_tlp.ready;
resp_state_next = RESP_STATE_READ;
end else if (resp_fifo_rd_op_write_reg) begin
m_axil_bready_next = 1'b1;
resp_state_next = RESP_STATE_WRITE;
end else begin
resp_state_next = RESP_STATE_CPL;
end
end else begin
resp_state_next = RESP_STATE_IDLE;
end
end
RESP_STATE_READ: begin
// read state - wait for read data and generate completion
m_axil_rready_next = !tx_cpl_tlp_valid_reg || tx_cpl_tlp.ready;
if (m_axil_rd.rready && m_axil_rd.rvalid) begin
m_axil_rready_next = 1'b0;
tx_cpl_tlp_hdr_next = cpl_tlp_hdr;
tx_cpl_tlp_data_next = TLP_DATA_W'(m_axil_rd.rdata);
tx_cpl_tlp_empty_next = '0;
tx_cpl_tlp_empty_next[0] = TLP_SEG_EMPTY_W'(TLP_DATA_W_DW-1);
tx_cpl_tlp_valid_next = 1'b1;
resp_fifo_rd_valid_next = 1'b0;
resp_state_next = RESP_STATE_IDLE;
end else begin
resp_state_next = RESP_STATE_READ;
end
end
RESP_STATE_WRITE: begin
// write state - wait for write response
m_axil_bready_next = 1'b1;
if (m_axil_wr.bready && m_axil_wr.bvalid) begin
m_axil_bready_next = 1'b0;
resp_fifo_rd_valid_next = 1'b0;
resp_state_next = RESP_STATE_IDLE;
end else begin
resp_state_next = RESP_STATE_WRITE;
end
end
RESP_STATE_CPL: begin
// completion state - generate completion
if (!tx_cpl_tlp_valid_reg || tx_cpl_tlp.ready) begin
tx_cpl_tlp_hdr_next = cpl_tlp_hdr;
tx_cpl_tlp_data_next = '0;
tx_cpl_tlp_empty_next = '0;
tx_cpl_tlp_empty_next[0] = TLP_SEG_EMPTY_W'(TLP_DATA_W_DW-1);
tx_cpl_tlp_valid_next = 1'b1;
resp_fifo_rd_valid_next = 1'b0;
resp_state_next = RESP_STATE_IDLE;
end else begin
resp_state_next = RESP_STATE_CPL;
end
end
endcase
if (!resp_fifo_rd_valid_next) begin
resp_fifo_rd_op_read_next = resp_fifo_op_read[resp_fifo_rd_ptr_reg[RESP_FIFO_ADDR_W-1:0]];
resp_fifo_rd_op_write_next = resp_fifo_op_write[resp_fifo_rd_ptr_reg[RESP_FIFO_ADDR_W-1:0]];
resp_fifo_rd_cpl_status_next = resp_fifo_cpl_status[resp_fifo_rd_ptr_reg[RESP_FIFO_ADDR_W-1:0]];
resp_fifo_rd_byte_count_next = resp_fifo_byte_count[resp_fifo_rd_ptr_reg[RESP_FIFO_ADDR_W-1:0]];
resp_fifo_rd_lower_addr_next = resp_fifo_lower_addr[resp_fifo_rd_ptr_reg[RESP_FIFO_ADDR_W-1:0]];
resp_fifo_rd_requester_id_next = resp_fifo_requester_id[resp_fifo_rd_ptr_reg[RESP_FIFO_ADDR_W-1:0]];
resp_fifo_rd_func_num_next = resp_fifo_func_num[resp_fifo_rd_ptr_reg[RESP_FIFO_ADDR_W-1:0]];
resp_fifo_rd_tag_next = resp_fifo_tag[resp_fifo_rd_ptr_reg[RESP_FIFO_ADDR_W-1:0]];
resp_fifo_rd_tc_next = resp_fifo_tc[resp_fifo_rd_ptr_reg[RESP_FIFO_ADDR_W-1:0]];
resp_fifo_rd_attr_next = resp_fifo_attr[resp_fifo_rd_ptr_reg[RESP_FIFO_ADDR_W-1:0]];
if (resp_fifo_rd_ptr_reg != resp_fifo_wr_ptr_reg) begin
resp_fifo_rd_ptr_next = resp_fifo_rd_ptr_reg + 1;
resp_fifo_rd_valid_next = 1'b1;
end
end
end
always_ff @(posedge clk) begin
req_state_reg <= req_state_next;
resp_state_reg <= resp_state_next;
rx_req_tlp_ready_reg <= rx_req_tlp_ready_next;
tx_cpl_tlp_data_reg <= tx_cpl_tlp_data_next;
tx_cpl_tlp_empty_reg <= tx_cpl_tlp_empty_next;
tx_cpl_tlp_hdr_reg <= tx_cpl_tlp_hdr_next;
tx_cpl_tlp_valid_reg <= tx_cpl_tlp_valid_next;
m_axil_addr_reg <= m_axil_addr_next;
m_axil_awvalid_reg <= m_axil_awvalid_next;
m_axil_wdata_reg <= m_axil_wdata_next;
m_axil_wstrb_reg <= m_axil_wstrb_next;
m_axil_wvalid_reg <= m_axil_wvalid_next;
m_axil_bready_reg <= m_axil_bready_next;
m_axil_arvalid_reg <= m_axil_arvalid_next;
m_axil_rready_reg <= m_axil_rready_next;
stat_err_cor_reg <= stat_err_cor_next;
stat_err_uncor_reg <= stat_err_uncor_next;
if (resp_fifo_we) begin
resp_fifo_op_read[resp_fifo_wr_ptr_reg[RESP_FIFO_ADDR_W-1:0]] <= resp_fifo_wr_op_read;
resp_fifo_op_write[resp_fifo_wr_ptr_reg[RESP_FIFO_ADDR_W-1:0]] <= resp_fifo_wr_op_write;
resp_fifo_cpl_status[resp_fifo_wr_ptr_reg[RESP_FIFO_ADDR_W-1:0]] <= resp_fifo_wr_cpl_status;
resp_fifo_byte_count[resp_fifo_wr_ptr_reg[RESP_FIFO_ADDR_W-1:0]] <= resp_fifo_wr_byte_count;
resp_fifo_lower_addr[resp_fifo_wr_ptr_reg[RESP_FIFO_ADDR_W-1:0]] <= resp_fifo_wr_lower_addr;
resp_fifo_requester_id[resp_fifo_wr_ptr_reg[RESP_FIFO_ADDR_W-1:0]] <= resp_fifo_wr_requester_id;
resp_fifo_func_num[resp_fifo_wr_ptr_reg[RESP_FIFO_ADDR_W-1:0]] <= resp_fifo_wr_func_num;
resp_fifo_tag[resp_fifo_wr_ptr_reg[RESP_FIFO_ADDR_W-1:0]] <= resp_fifo_wr_tag;
resp_fifo_tc[resp_fifo_wr_ptr_reg[RESP_FIFO_ADDR_W-1:0]] <= resp_fifo_wr_tc;
resp_fifo_attr[resp_fifo_wr_ptr_reg[RESP_FIFO_ADDR_W-1:0]] <= resp_fifo_wr_attr;
resp_fifo_wr_ptr_reg <= resp_fifo_wr_ptr_reg + 1;
end
resp_fifo_rd_ptr_reg <= resp_fifo_rd_ptr_next;
resp_fifo_rd_op_read_reg <= resp_fifo_rd_op_read_next;
resp_fifo_rd_op_write_reg <= resp_fifo_rd_op_write_next;
resp_fifo_rd_cpl_status_reg <= resp_fifo_rd_cpl_status_next;
resp_fifo_rd_byte_count_reg <= resp_fifo_rd_byte_count_next;
resp_fifo_rd_lower_addr_reg <= resp_fifo_rd_lower_addr_next;
resp_fifo_rd_requester_id_reg <= resp_fifo_rd_requester_id_next;
resp_fifo_rd_func_num_reg <= resp_fifo_rd_func_num_next;
resp_fifo_rd_tag_reg <= resp_fifo_rd_tag_next;
resp_fifo_rd_tc_reg <= resp_fifo_rd_tc_next;
resp_fifo_rd_attr_reg <= resp_fifo_rd_attr_next;
resp_fifo_rd_valid_reg <= resp_fifo_rd_valid_next;
resp_fifo_half_full_reg <= $unsigned(resp_fifo_wr_ptr_reg - resp_fifo_rd_ptr_reg) >= 2**(RESP_FIFO_ADDR_W-1);
if (rst) begin
req_state_reg <= REQ_STATE_IDLE;
resp_state_reg <= RESP_STATE_IDLE;
rx_req_tlp_ready_reg <= 1'b0;
tx_cpl_tlp_valid_reg <= 1'b0;
m_axil_awvalid_reg <= 1'b0;
m_axil_wvalid_reg <= 1'b0;
m_axil_bready_reg <= 1'b0;
m_axil_arvalid_reg <= 1'b0;
m_axil_rready_reg <= 1'b0;
stat_err_cor_reg <= 1'b0;
stat_err_uncor_reg <= 1'b0;
resp_fifo_wr_ptr_reg <= 0;
resp_fifo_rd_ptr_reg <= 0;
resp_fifo_rd_valid_reg <= 1'b0;
end
end
endmodule
`resetall

View File

@@ -0,0 +1,53 @@
# SPDX-License-Identifier: CERN-OHL-S-2.0
#
# Copyright (c) 2021-2025 FPGA Ninja, LLC
#
# Authors:
# - Alex Forencich
TOPLEVEL_LANG = verilog
SIM ?= verilator
WAVES ?= 0
COCOTB_HDL_TIMEUNIT = 1ns
COCOTB_HDL_TIMEPRECISION = 1ps
DUT = taxi_pcie_axil_master_minimal
COCOTB_TEST_MODULES = test_$(DUT)
COCOTB_TOPLEVEL = test_$(DUT)
MODULE = $(COCOTB_TEST_MODULES)
TOPLEVEL = $(COCOTB_TOPLEVEL)
VERILOG_SOURCES += $(COCOTB_TOPLEVEL).sv
VERILOG_SOURCES += ../../../rtl/pcie/$(DUT).sv
VERILOG_SOURCES += ../../../rtl/pcie/taxi_pcie_tlp_if.sv
VERILOG_SOURCES += ../../../rtl/axi/taxi_axil_if.sv
# handle file list files
process_f_file = $(call process_f_files,$(addprefix $(dir $1),$(shell cat $1)))
process_f_files = $(foreach f,$1,$(if $(filter %.f,$f),$(call process_f_file,$f),$f))
uniq_base = $(if $1,$(call uniq_base,$(foreach f,$1,$(if $(filter-out $(notdir $(lastword $1)),$(notdir $f)),$f,))) $(lastword $1))
VERILOG_SOURCES := $(call uniq_base,$(call process_f_files,$(VERILOG_SOURCES)))
# module parameters
export PARAM_TLP_SEG_DATA_W := 64
export PARAM_TLP_HDR_W := 128
export PARAM_TLP_SEGS := 1
export PARAM_AXIL_DATA_W := 32
export PARAM_AXIL_ADDR_W := 64
export PARAM_TLP_FORCE_64_BIT_ADDR := 0
ifeq ($(SIM), icarus)
PLUSARGS += -fst
COMPILE_ARGS += $(foreach v,$(filter PARAM_%,$(.VARIABLES)),-P $(COCOTB_TOPLEVEL).$(subst PARAM_,,$(v))=$($(v)))
else ifeq ($(SIM), verilator)
COMPILE_ARGS += $(foreach v,$(filter PARAM_%,$(.VARIABLES)),-G$(subst PARAM_,,$(v))=$($(v)))
ifeq ($(WAVES), 1)
COMPILE_ARGS += --trace-fst
VERILATOR_TRACE = 1
endif
endif
include $(shell cocotb-config --makefiles)/Makefile.sim

View File

@@ -0,0 +1 @@
../pcie_if.py

View File

@@ -0,0 +1,395 @@
#!/usr/bin/env python
# SPDX-License-Identifier: CERN-OHL-S-2.0
"""
Copyright (c) 2021-2025 FPGA Ninja, LLC
Authors:
- Alex Forencich
"""
import itertools
import logging
import os
import re
import sys
from contextlib import contextmanager
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, Timer
from cocotb.regression import TestFactory
from cocotbext.pcie.core import RootComplex
from cocotbext.axi import AxiLiteBus, AxiLiteRam
try:
from pcie_if import PcieIfDevice, PcieIfRxBus, PcieIfTxBus
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
from pcie_if import PcieIfDevice, PcieIfRxBus, PcieIfTxBus
finally:
del sys.path[0]
@contextmanager
def assert_raises(exc_type, pattern=None):
try:
yield
except exc_type as e:
if pattern:
assert re.match(pattern, str(e)), \
"Correct exception type caught, but message did not match pattern"
pass
else:
raise AssertionError("{} was not raised".format(exc_type.__name__))
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 4, units="ns").start())
# PCIe
self.rc = RootComplex()
self.dev = PcieIfDevice(
clk=dut.clk,
rst=dut.rst,
rx_req_tlp_bus=PcieIfRxBus.from_entity(dut.rx_req_tlp),
tx_cpl_tlp_bus=PcieIfTxBus.from_entity(dut.tx_cpl_tlp)
)
self.dev.log.setLevel(logging.DEBUG)
self.dev.functions[0].configure_bar(0, 16*1024*1024)
self.dev.functions[0].configure_bar(1, 16*1024, io=True)
self.rc.make_port().connect(self.dev)
# AXI
self.axil_ram = AxiLiteRam(AxiLiteBus.from_entity(dut.m_axil), dut.clk, dut.rst, size=2**16)
dut.bus_num.setimmediatevalue(0)
# monitor error outputs
self.stat_err_cor_asserted = False
self.stat_err_uncor_asserted = False
cocotb.start_soon(self._run_monitor_stat_err_cor())
cocotb.start_soon(self._run_monitor_stat_err_uncor())
def set_idle_generator(self, generator=None):
if generator:
self.dev.rx_req_tlp_source.set_pause_generator(generator())
self.axil_ram.write_if.b_channel.set_pause_generator(generator())
self.axil_ram.read_if.r_channel.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.dev.tx_cpl_tlp_sink.set_pause_generator(generator())
self.axil_ram.write_if.aw_channel.set_pause_generator(generator())
self.axil_ram.write_if.w_channel.set_pause_generator(generator())
self.axil_ram.read_if.ar_channel.set_pause_generator(generator())
async def _run_monitor_stat_err_cor(self):
while True:
await RisingEdge(self.dut.stat_err_cor)
self.log.info("stat_err_cor (correctable error) was asserted")
self.stat_err_cor_asserted = True
async def _run_monitor_stat_err_uncor(self):
while True:
await RisingEdge(self.dut.stat_err_uncor)
self.log.info("stat_err_uncor (uncorrectable error) was asserted")
self.stat_err_uncor_asserted = True
async def cycle_reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test_write(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
await tb.cycle_reset()
await tb.rc.enumerate()
dev = tb.rc.find_device(tb.dev.functions[0].pcie_id)
await dev.enable_device()
dev_bar0 = dev.bar_window[0]
tb.dut.bus_num.value = tb.dev.functions[0].pcie_id.bus
for length in range(0, 5):
for pcie_offset in range(4-length+1):
tb.log.info("length %d, pcie_offset %d", length, pcie_offset)
pcie_addr = pcie_offset+0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axil_ram.write(pcie_addr-128, b'\x55'*(len(test_data)+256))
await dev_bar0.write(pcie_addr, test_data)
# wait for write to complete
val = await dev_bar0.read(pcie_addr, len(test_data), timeout=1000, timeout_unit='ns')
tb.log.debug("%s", tb.axil_ram.hexdump_str((pcie_addr & ~0xf)-16, (((pcie_addr & 0xf)+length-1) & ~0xf)+48))
assert tb.axil_ram.read(pcie_addr-1, len(test_data)+2) == b'\x55'+test_data+b'\x55'
assert not tb.stat_err_cor_asserted
assert not tb.stat_err_uncor_asserted
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_read(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
await tb.cycle_reset()
await tb.rc.enumerate()
dev = tb.rc.find_device(tb.dev.functions[0].pcie_id)
await dev.enable_device()
dev_bar0 = dev.bar_window[0]
tb.dut.bus_num.value = tb.dev.functions[0].pcie_id.bus
for length in range(0, 5):
for pcie_offset in range(4-length+1):
tb.log.info("length %d, pcie_offset %d", length, pcie_offset)
pcie_addr = pcie_offset+0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axil_ram.write(pcie_addr-128, b'\x55'*(len(test_data)+256))
tb.axil_ram.write(pcie_addr, test_data)
tb.log.debug("%s", tb.axil_ram.hexdump_str((pcie_addr & ~0xf)-16, (((pcie_addr & 0xf)+length-1) & ~0xf)+48))
val = await dev_bar0.read(pcie_addr, len(test_data), timeout=1000, timeout_unit='ns')
tb.log.debug("read data: %s", val)
assert val == test_data
assert not tb.stat_err_cor_asserted
assert not tb.stat_err_uncor_asserted
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_bad_ops(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
await tb.cycle_reset()
await tb.rc.enumerate()
dev = tb.rc.find_device(tb.dev.functions[0].pcie_id)
await dev.enable_device()
dev_bar0 = dev.bar_window[0]
dev_bar1 = dev.bar_window[1]
tb.dut.bus_num.value = tb.dev.functions[0].pcie_id.bus
tb.log.info("Test IO write")
length = 4
pcie_addr = 0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axil_ram.write(pcie_addr-128, b'\x55'*(len(test_data)+256))
with assert_raises(Exception, "Unsuccessful completion"):
await dev_bar1.write(pcie_addr, test_data, timeout=1000, timeout_unit='ns')
await Timer(100, 'ns')
tb.log.debug("%s", tb.axil_ram.hexdump_str((pcie_addr & ~0xf)-16, (((pcie_addr & 0xf)+length-1) & ~0xf)+48, prefix="AXI "))
assert tb.axil_ram.read(pcie_addr-1, len(test_data)+2) == b'\x55'*(len(test_data)+2)
assert tb.stat_err_cor_asserted
assert not tb.stat_err_uncor_asserted
tb.stat_err_cor_asserted = False
tb.stat_err_uncor_asserted = False
tb.log.info("Test IO read")
length = 4
pcie_addr = 0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axil_ram.write(pcie_addr-128, b'\x55'*(len(test_data)+256))
tb.axil_ram.write(pcie_addr, test_data)
tb.log.debug("%s", tb.axil_ram.hexdump_str((pcie_addr & ~0xf)-16, (((pcie_addr & 0xf)+length-1) & ~0xf)+48, prefix="AXI "))
with assert_raises(Exception, "Unsuccessful completion"):
val = await dev_bar1.read(pcie_addr, len(test_data), timeout=1000, timeout_unit='ns')
assert tb.stat_err_cor_asserted
assert not tb.stat_err_uncor_asserted
tb.stat_err_cor_asserted = False
tb.stat_err_uncor_asserted = False
tb.log.info("Test bad write")
length = 32
pcie_addr = 0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axil_ram.write(pcie_addr-128, b'\x55'*(len(test_data)+256))
await dev_bar0.write(pcie_addr, test_data)
await Timer(100, 'ns')
tb.log.debug("%s", tb.axil_ram.hexdump_str((pcie_addr & ~0xf)-16, (((pcie_addr & 0xf)+length-1) & ~0xf)+48, prefix="AXI "))
assert tb.axil_ram.read(pcie_addr-1, len(test_data)+2) == b'\x55'*(len(test_data)+2)
assert not tb.stat_err_cor_asserted
assert tb.stat_err_uncor_asserted
tb.stat_err_cor_asserted = False
tb.stat_err_uncor_asserted = False
tb.log.info("Test bad read")
length = 32
pcie_addr = 0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axil_ram.write(pcie_addr-128, b'\x55'*(len(test_data)+256))
tb.axil_ram.write(pcie_addr, test_data)
tb.log.debug("%s", tb.axil_ram.hexdump_str((pcie_addr & ~0xf)-16, (((pcie_addr & 0xf)+length-1) & ~0xf)+48, prefix="AXI "))
with assert_raises(Exception, "Unsuccessful completion"):
val = await dev_bar0.read(pcie_addr, len(test_data), timeout=1000, timeout_unit='ns')
assert tb.stat_err_cor_asserted
assert not tb.stat_err_uncor_asserted
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
if cocotb.SIM_NAME:
for test in [
run_test_write,
run_test_read,
run_test_bad_ops
]:
factory = TestFactory(test)
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', '..', 'rtl'))
def process_f_files(files):
lst = {}
for f in files:
if f[-2:].lower() == '.f':
with open(f, 'r') as fp:
l = fp.read().split()
for f in process_f_files([os.path.join(os.path.dirname(f), x) for x in l]):
lst[os.path.basename(f)] = f
else:
lst[os.path.basename(f)] = f
return list(lst.values())
@pytest.mark.parametrize("axil_data_w", [32])
@pytest.mark.parametrize("pcie_data_w", [64, 128, 256, 512])
def test_taxi_pcie_axil_master_minimal(request, pcie_data_w, axil_data_w):
dut = "taxi_pcie_axil_master_minimal"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = module
verilog_sources = [
os.path.join(tests_dir, f"{toplevel}.sv"),
os.path.join(rtl_dir, "pcie", f"{dut}.sv"),
os.path.join(rtl_dir, "pcie", "taxi_pcie_tlp_if.sv"),
os.path.join(rtl_dir, "axi", "taxi_axil_if.sv"),
]
verilog_sources = process_f_files(verilog_sources)
parameters = {}
parameters['TLP_SEG_DATA_W'] = pcie_data_w
parameters['TLP_HDR_W'] = 128
parameters['TLP_SEGS'] = 1
parameters['AXIL_DATA_W'] = axil_data_w
parameters['AXIL_ADDR_W'] = 64
parameters['TLP_FORCE_64_BIT_ADDR'] = 0
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
simulator="verilator",
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)

View File

@@ -0,0 +1,93 @@
// SPDX-License-Identifier: CERN-OHL-S-2.0
/*
Copyright (c) 2025 FPGA Ninja, LLC
Authors:
- Alex Forencich
*/
`resetall
`timescale 1ns / 1ps
`default_nettype none
/*
* PCIe AXI Lite Master (minimal) testbench
*/
module test_taxi_pcie_axil_master_minimal #
(
/* verilator lint_off WIDTHTRUNC */
parameter TLP_SEG_DATA_W = 64,
parameter TLP_HDR_W = 128,
parameter TLP_SEGS = 1,
parameter AXIL_DATA_W = 32,
parameter AXIL_ADDR_W = 64,
parameter logic TLP_FORCE_64_BIT_ADDR = 1'b0
/* verilator lint_on WIDTHTRUNC */
)
();
logic clk;
logic rst;
taxi_pcie_tlp_if #(
.SEGS(TLP_SEGS),
.SEG_DATA_W(TLP_SEG_DATA_W),
.HDR_W(TLP_HDR_W),
.FUNC_NUM_W(8)
) rx_req_tlp(), tx_cpl_tlp();
taxi_axil_if #(
.DATA_W(AXIL_DATA_W),
.ADDR_W(AXIL_ADDR_W),
.AWUSER_EN(1'b0),
.WUSER_EN(1'b0),
.BUSER_EN(1'b0),
.ARUSER_EN(1'b0),
.RUSER_EN(1'b0)
) m_axil();
logic [7:0] bus_num;
logic stat_err_cor;
logic stat_err_uncor;
taxi_pcie_axil_master_minimal #(
.TLP_FORCE_64_BIT_ADDR(TLP_FORCE_64_BIT_ADDR)
)
uut (
.clk(clk),
.rst(rst),
/*
* TLP input (request)
*/
.rx_req_tlp(rx_req_tlp),
/*
* TLP output (completion)
*/
.tx_cpl_tlp(tx_cpl_tlp),
/*
* AXI Lite Master output
*/
.m_axil_wr(m_axil),
.m_axil_rd(m_axil),
/*
* Configuration
*/
.bus_num(bus_num),
/*
* Status
*/
.stat_err_cor(stat_err_cor),
.stat_err_uncor(stat_err_uncor)
);
endmodule
`resetall