octeontx2-af: Register for CGX lmac events
authorLinu Cherian <lcherian@marvell.com>
Wed, 10 Oct 2018 12:44:34 +0000 (18:14 +0530)
committerDavid S. Miller <davem@davemloft.net>
Wed, 10 Oct 2018 17:06:02 +0000 (10:06 -0700)
Added support in RVU AF driver to register for
CGX LMAC link status change events from firmware
and managing them. Processing part will be added
in followup patches.

- Introduced eventqueue for posting events from cgx lmac.
  Queueing mechanism will ensure that events can be posted
  and firmware can be acked immediately and hence event
  reception and processing are decoupled.
- Events gets added to the queue by notification callback.
  Notification callback is expected to be atomic, since it
  is called from interrupt context.
- Events are dequeued and processed in a worker thread.

Signed-off-by: Linu Cherian <lcherian@marvell.com>
Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c

index e9021a8138896deac3db8101d199ff174c41dc42..2033f42c322679722783ba2c0e04a9e27df23b17 100644 (file)
@@ -1564,10 +1564,11 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        err = rvu_register_interrupts(rvu);
        if (err)
-               goto err_mbox;
+               goto err_cgx;
 
        return 0;
-
+err_cgx:
+       rvu_cgx_wq_destroy(rvu);
 err_mbox:
        rvu_mbox_destroy(rvu);
 err_hwsetup:
@@ -1589,6 +1590,7 @@ static void rvu_remove(struct pci_dev *pdev)
        struct rvu *rvu = pci_get_drvdata(pdev);
 
        rvu_unregister_interrupts(rvu);
+       rvu_cgx_wq_destroy(rvu);
        rvu_mbox_destroy(rvu);
        rvu_reset_all_blocks(rvu);
        rvu_free_hw_resources(rvu);
index 385f597c1fe3b42782a893d8c2a03f6138f89e25..d169fa9eb45ebe43954193767182da3352419f80 100644 (file)
@@ -110,6 +110,10 @@ struct rvu {
                                                  * every cgx lmac port
                                                  */
        void                    **cgx_idmap; /* cgx id to cgx data map table */
+       struct                  work_struct cgx_evh_work;
+       struct                  workqueue_struct *cgx_evh_wq;
+       spinlock_t              cgx_evq_lock; /* cgx event queue lock */
+       struct list_head        cgx_evq_head; /* cgx event queue head */
 };
 
 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -150,4 +154,5 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
 
 /* CGX APIs */
 int rvu_cgx_probe(struct rvu *rvu);
+void rvu_cgx_wq_destroy(struct rvu *rvu);
 #endif /* RVU_H */
index bf8150727e24b63bde1cabf0b630c7374e7bfeda..5ecc22308b229a5baa16c779ba7922f017550158 100644 (file)
 #include "rvu.h"
 #include "cgx.h"
 
+struct cgx_evq_entry {
+       struct list_head evq_node;
+       struct cgx_link_event link_event;
+};
+
 static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
 {
        return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
@@ -72,9 +77,95 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
        return 0;
 }
 
+/* This is called from interrupt context and is expected to be atomic */
+static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
+{
+       struct cgx_evq_entry *qentry;
+       struct rvu *rvu = data;
+
+       /* post event to the event queue */
+       qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+       if (!qentry)
+               return -ENOMEM;
+       qentry->link_event = *event;
+       spin_lock(&rvu->cgx_evq_lock);
+       list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+       spin_unlock(&rvu->cgx_evq_lock);
+
+       /* start worker to process the events */
+       queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+       return 0;
+}
+
+static void cgx_evhandler_task(struct work_struct *work)
+{
+       struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
+       struct cgx_evq_entry *qentry;
+       struct cgx_link_event *event;
+       unsigned long flags;
+
+       do {
+               /* Dequeue an event */
+               spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+               qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
+                                                 struct cgx_evq_entry,
+                                                 evq_node);
+               if (qentry)
+                       list_del(&qentry->evq_node);
+               spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+               if (!qentry)
+                       break; /* nothing more to process */
+
+               event = &qentry->link_event;
+
+               /* Do nothing for now */
+               kfree(qentry);
+       } while (1);
+}
+
+static void cgx_lmac_event_handler_init(struct rvu *rvu)
+{
+       struct cgx_event_cb cb;
+       int cgx, lmac, err;
+       void *cgxd;
+
+       spin_lock_init(&rvu->cgx_evq_lock);
+       INIT_LIST_HEAD(&rvu->cgx_evq_head);
+       INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
+       rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+       if (!rvu->cgx_evh_wq) {
+               dev_err(rvu->dev, "alloc workqueue failed");
+               return;
+       }
+
+       cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
+       cb.data = rvu;
+
+       for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+               cgxd = rvu_cgx_pdata(cgx, rvu);
+               for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+                       err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+                       if (err)
+                               dev_err(rvu->dev,
+                                       "%d:%d handler register failed\n",
+                                       cgx, lmac);
+               }
+       }
+}
+
+void rvu_cgx_wq_destroy(struct rvu *rvu)
+{
+       if (rvu->cgx_evh_wq) {
+               flush_workqueue(rvu->cgx_evh_wq);
+               destroy_workqueue(rvu->cgx_evh_wq);
+               rvu->cgx_evh_wq = NULL;
+       }
+}
+
 int rvu_cgx_probe(struct rvu *rvu)
 {
-       int i;
+       int i, err;
 
        /* find available cgx ports */
        rvu->cgx_cnt = cgx_get_cgx_cnt();
@@ -93,5 +184,11 @@ int rvu_cgx_probe(struct rvu *rvu)
                rvu->cgx_idmap[i] = cgx_get_pdata(i);
 
        /* Map CGX LMAC interfaces to RVU PFs */
-       return rvu_map_cgx_lmac_pf(rvu);
+       err = rvu_map_cgx_lmac_pf(rvu);
+       if (err)
+               return err;
+
+       /* Register for CGX events */
+       cgx_lmac_event_handler_init(rvu);
+       return 0;
 }