#define DRV_NAME "qtnfmac_pearl_pcie"
+struct qtnf_pearl_bda {
+ __le16 bda_len;
+ __le16 bda_version;
+ __le32 bda_pci_endian;
+ __le32 bda_ep_state;
+ __le32 bda_rc_state;
+ __le32 bda_dma_mask;
+ __le32 bda_msi_addr;
+ __le32 bda_flashsz;
+ u8 bda_boardname[PCIE_BDA_NAMELEN];
+ __le32 bda_rc_msi_enabled;
+ u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE];
+ __le32 bda_dsbw_start_index;
+ __le32 bda_dsbw_end_index;
+ __le32 bda_dsbw_total_bytes;
+ __le32 bda_rc_tx_bd_base;
+ __le32 bda_rc_tx_bd_num;
+ u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH];
+ struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */
+ struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */
+} __packed;
+
+struct qtnf_pearl_tx_bd {
+ __le32 addr;
+ __le32 addr_h;
+ __le32 info;
+ __le32 info_h;
+} __packed;
+
+struct qtnf_pearl_rx_bd {
+ __le32 addr;
+ __le32 addr_h;
+ __le32 info;
+ __le32 info_h;
+ __le32 next_ptr;
+ __le32 next_ptr_h;
+} __packed;
+
+struct qtnf_pearl_fw_hdr {
+ u8 boardflg[8];
+ __le32 fwsize;
+ __le32 seqnum;
+ __le32 type;
+ __le32 pktlen;
+ __le32 crc;
+} __packed;
+
struct qtnf_pcie_pearl_state {
struct pci_dev *pdev;
struct qtnf_shm_ipc shm_ipc_ep_in;
struct qtnf_shm_ipc shm_ipc_ep_out;
- struct qtnf_pcie_bda __iomem *bda;
+ struct qtnf_pearl_bda __iomem *bda;
void __iomem *pcie_reg_base;
u16 tx_bd_num;
struct sk_buff **tx_skb;
struct sk_buff **rx_skb;
- struct qtnf_tx_bd *tx_bd_vbase;
+ struct qtnf_pearl_tx_bd *tx_bd_vbase;
dma_addr_t tx_bd_pbase;
- struct qtnf_rx_bd *rx_bd_vbase;
+ struct qtnf_pearl_rx_bd *rx_bd_vbase;
dma_addr_t rx_bd_pbase;
dma_addr_t bd_table_paddr;
void *vaddr;
int len;
- len = priv->tx_bd_num * sizeof(struct qtnf_tx_bd) +
- priv->rx_bd_num * sizeof(struct qtnf_rx_bd);
+ len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
+ priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
if (!vaddr)
/* rx bd */
- vaddr = ((struct qtnf_tx_bd *)vaddr) + priv->tx_bd_num;
- paddr += priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
+ vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
+ paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
priv->rx_bd_vbase = vaddr;
priv->rx_bd_pbase = paddr;
#endif
writel(QTN_HOST_LO32(paddr),
PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base));
- writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16,
+ writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16,
PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base));
pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
static int skb2rbd_attach(struct qtnf_pcie_pearl_state *priv, u16 index)
{
- struct qtnf_rx_bd *rxbd;
+ struct qtnf_pearl_rx_bd *rxbd;
struct sk_buff *skb;
dma_addr_t paddr;
int ret = 0;
memset(priv->rx_bd_vbase, 0x0,
- priv->rx_bd_num * sizeof(struct qtnf_rx_bd));
+ priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd));
for (i = 0; i < priv->rx_bd_num; i++) {
ret = skb2rbd_attach(priv, i);
/* all rx/tx activity should have ceased before calling this function */
static void qtnf_free_xfer_buffers(struct qtnf_pcie_pearl_state *priv)
{
- struct qtnf_tx_bd *txbd;
- struct qtnf_rx_bd *rxbd;
+ struct qtnf_pearl_tx_bd *txbd;
+ struct qtnf_pearl_rx_bd *rxbd;
struct sk_buff *skb;
dma_addr_t paddr;
int i;
return -EINVAL;
}
- val = priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
+ val = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
if (val > PCIE_HHBM_MAX_SIZE) {
pr_err("tx_bd_size_param %u is too large\n",
priv->tx_bd_num);
static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_pearl_state *priv)
{
- struct qtnf_tx_bd *txbd;
+ struct qtnf_pearl_tx_bd *txbd;
struct sk_buff *skb;
unsigned long flags;
dma_addr_t paddr;
{
struct qtnf_pcie_pearl_state *priv = (void *)get_bus_priv(bus);
dma_addr_t txbd_paddr, skb_paddr;
- struct qtnf_tx_bd *txbd;
+ struct qtnf_pearl_tx_bd *txbd;
unsigned long flags;
int len, i;
u32 info;
dma_wmb();
/* write new TX descriptor to PCIE_RX_FIFO on EP */
- txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_tx_bd);
+ txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
writel(QTN_HOST_HI32(txbd_paddr),
static int qtnf_rx_data_ready(struct qtnf_pcie_pearl_state *priv)
{
u16 index = priv->rx_bd_r_index;
- struct qtnf_rx_bd *rxbd;
+ struct qtnf_pearl_rx_bd *rxbd;
u32 descw;
rxbd = &priv->rx_bd_vbase[index];
struct net_device *ndev = NULL;
struct sk_buff *skb = NULL;
int processed = 0;
- struct qtnf_rx_bd *rxbd;
+ struct qtnf_pearl_rx_bd *rxbd;
dma_addr_t skb_paddr;
int consume;
u32 descw;
struct pci_dev *pdev = priv->pdev;
struct qtnf_bus *bus = pci_get_drvdata(pdev);
- struct qtnf_pcie_fw_hdr *hdr;
+ struct qtnf_pearl_fw_hdr *hdr;
u8 *pdata;
int hds = sizeof(*hdr);
skb->len = QTN_PCIE_FW_BUFSZ;
skb->dev = NULL;
- hdr = (struct qtnf_pcie_fw_hdr *)skb->data;
+ hdr = (struct qtnf_pearl_fw_hdr *)skb->data;
memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
hdr->fwsize = cpu_to_le32(size);
hdr->seqnum = cpu_to_le32(blk);
static int
qtnf_ep_fw_load(struct qtnf_pcie_pearl_state *priv, const u8 *fw, u32 fw_size)
{
- int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pcie_fw_hdr);
+ int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pearl_fw_hdr);
int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
const u8 *pblk = fw;
int threshold = 0;