bnxt_en: Add new flags to setup new page table PTE bits on newer devices.
authorMichael Chan <michael.chan@broadcom.com>
Sun, 14 Oct 2018 11:02:42 +0000 (07:02 -0400)
committerDavid S. Miller <davem@davemloft.net>
Tue, 16 Oct 2018 05:44:31 +0000 (22:44 -0700)
Newer chips require the PTU_PTE_VALID bit to be set for every page
table entry for context memory and rings.  Additional bits are also
required for page table entries for all rings.  Add a flags field to
bnxt_ring_mem_info struct to specify these additional bits to be used
when setting up the pages tables as needed.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h

index 602dc0975eeba737a446108560555ee7142c3ebe..f0da558561f124e6a4e438d4d1dea31675df64d8 100644 (file)
@@ -2230,8 +2230,11 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 {
        struct pci_dev *pdev = bp->pdev;
+       u64 valid_bit = 0;
        int i;
 
+       if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
+               valid_bit = PTU_PTE_VALID;
        if (rmem->nr_pages > 1) {
                rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
                                                  rmem->nr_pages * 8,
@@ -2242,6 +2245,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
        }
 
        for (i = 0; i < rmem->nr_pages; i++) {
+               u64 extra_bits = valid_bit;
+
                rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
                                                     rmem->page_size,
                                                     &rmem->dma_arr[i],
@@ -2249,8 +2254,16 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
                if (!rmem->pg_arr[i])
                        return -ENOMEM;
 
-               if (rmem->nr_pages > 1)
-                       rmem->pg_tbl[i] = cpu_to_le64(rmem->dma_arr[i]);
+               if (rmem->nr_pages > 1) {
+                       if (i == rmem->nr_pages - 2 &&
+                           (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+                               extra_bits |= PTU_PTE_NEXT_TO_LAST;
+                       else if (i == rmem->nr_pages - 1 &&
+                                (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+                               extra_bits |= PTU_PTE_LAST;
+                       rmem->pg_tbl[i] =
+                               cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+               }
        }
 
        if (rmem->vmem_size) {
index 2e4b62147cf5d6ff45313a055083ca129252ca84..5792e5c181707a76520c71792556c838cf082ed7 100644 (file)
@@ -580,6 +580,10 @@ struct bnxt_sw_rx_agg_bd {
 struct bnxt_ring_mem_info {
        int                     nr_pages;
        int                     page_size;
+       u32                     flags;
+#define BNXT_RMEM_VALID_PTE_FLAG       1
+#define BNXT_RMEM_RING_PTE_FLAG                2
+
        void                    **pg_arr;
        dma_addr_t              *dma_arr;
 
@@ -1109,6 +1113,10 @@ struct bnxt_vf_rep {
        struct bnxt_vf_rep_stats        tx_stats;
 };
 
+#define PTU_PTE_VALID             0x1UL
+#define PTU_PTE_LAST              0x2UL
+#define PTU_PTE_NEXT_TO_LAST      0x4UL
+
 struct bnxt {
        void __iomem            *bar0;
        void __iomem            *bar1;