From 06beefd6d54c9491c93f03dfdd6174fd29dd3ad8 Mon Sep 17 00:00:00 2001
From: Koen Vandeputte <koen.vandeputte@ncentric.com>
Date: Tue, 7 Aug 2018 11:18:08 +0200
Subject: [PATCH] cns3xxx: ethernet: cleanup code

Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
---
 .../drivers/net/ethernet/cavium/cns3xxx_eth.c | 35 ++++++++++++-------
 1 file changed, 22 insertions(+), 13 deletions(-)

diff --git a/target/linux/cns3xxx/files/drivers/net/ethernet/cavium/cns3xxx_eth.c b/target/linux/cns3xxx/files/drivers/net/ethernet/cavium/cns3xxx_eth.c
index 776a06402f..9397a9e7ab 100644
--- a/target/linux/cns3xxx/files/drivers/net/ethernet/cavium/cns3xxx_eth.c
+++ b/target/linux/cns3xxx/files/drivers/net/ethernet/cavium/cns3xxx_eth.c
@@ -325,6 +325,7 @@ static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
 	} else {
 		temp = MDIO_READ_COMMAND;
 	}
+
 	temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
 	temp |= (phy_id & 0x1f);
 
@@ -337,8 +338,7 @@ static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
 	}
 
 	if (cycles == 5000) {
-		printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
-		       phy_id);
+		printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name, phy_id);
 		return -1;
 	}
 
@@ -363,8 +363,7 @@ static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
 	return ret;
 }
 
-static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
-			     u16 val)
+static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location, u16 val)
 {
 	unsigned long flags;
 	int ret;
@@ -392,6 +391,7 @@ static int cns3xxx_mdio_register(void __iomem *base)
 
 	if ((err = mdiobus_register(mdio_bus)))
 		mdiobus_free(mdio_bus);
+
 	return err;
 }
 
@@ -537,14 +537,13 @@ static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
 		/* put the new buffer on RX-free queue */
 		rx_ring->buff_tab[i] = buf;
 		rx_ring->phys_tab[i] = phys;
+
 		if (i == RX_DESCS - 1) {
+			desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | RX_SEGMENT_MRU | END_OF_RING;
 			i = 0;
-			desc->config0 = END_OF_RING | FIRST_SEGMENT |
-					LAST_SEGMENT | RX_SEGMENT_MRU;
 			desc = &(rx_ring)->desc[i];
 		} else {
-			desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
-					RX_SEGMENT_MRU;
+			desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | RX_SEGMENT_MRU;
 			i++;
 			desc++;
 		}
@@ -566,6 +565,7 @@ static void eth_check_num_used(struct _tx_ring *tx_ring)
 		return;
 
 	tx_ring->stopped = stop;
+
 	for (i = 0; i < 4; i++) {
 		struct port *port = switch_port_tab[i];
 		struct net_device *dev;
@@ -574,6 +574,7 @@ static void eth_check_num_used(struct _tx_ring *tx_ring)
 			continue;
 
 		dev = port->netdev;
+
 		if (stop)
 			netif_stop_queue(dev);
 		else
@@ -592,6 +593,7 @@ static void eth_complete_tx(struct sw *sw)
 
 	index = tx_ring->free_index;
 	desc = &(tx_ring)->desc[index];
+
 	for (i = 0; i < num_used; i++) {
 		if (desc->cown) {
 			skb = tx_ring->buff_tab[index];
@@ -610,6 +612,7 @@ static void eth_complete_tx(struct sw *sw)
 			break;
 		}
 	}
+
 	tx_ring->free_index = index;
 	tx_ring->num_used -= i;
 	eth_check_num_used(tx_ring);
@@ -633,8 +636,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
 			break;
 
 		/* process received frame */
-		dma_unmap_single(sw->dev, rx_ring->phys_tab[i],
-				 RX_SEGMENT_MRU, DMA_FROM_DEVICE);
+		dma_unmap_single(sw->dev, rx_ring->phys_tab[i], RX_SEGMENT_MRU, DMA_FROM_DEVICE);
 
 		skb = build_skb(rx_ring->buff_tab[i], RX_SEGMENT_ALLOC_SIZE);
 		if (!skb)
@@ -741,8 +743,10 @@ static void eth_set_desc(struct sw *sw, struct _tx_ring *tx_ring, int index,
 	tx_ring->phys_tab[index] = phys;
 
 	config0 |= len;
+
 	if (index == TX_DESCS - 1)
 		config0 |= END_OF_RING;
+
 	if (index == index_last)
 		config0 |= LAST_SEGMENT;
 
@@ -772,6 +776,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	eth_schedule_poll(sw);
 	spin_lock_bh(&tx_lock);
+
 	if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
 		spin_unlock_bh(&tx_lock);
 		return NETDEV_TX_BUSY;
@@ -875,7 +880,6 @@ static int init_rings(struct sw *sw)
 	__raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
 	__raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
 	__raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
-
 	__raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
 
 	rx_ring->desc = dmam_alloc_coherent(sw->dev, RX_POOL_ALLOC_SIZE,
@@ -885,6 +889,7 @@ static int init_rings(struct sw *sw)
 
 	/* Setup RX buffers */
 	memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
+
 	for (i = 0; i < RX_DESCS; i++) {
 		struct rx_desc *desc = &(rx_ring)->desc[i];
 		void *buf;
@@ -894,13 +899,16 @@ static int init_rings(struct sw *sw)
 			return -ENOMEM;
 
 		desc->sdl = RX_SEGMENT_MRU;
+
 		if (i == (RX_DESCS - 1))
 			desc->eor = 1;
+
 		desc->fsd = 1;
 		desc->lsd = 1;
 
 		desc->sdp = dma_map_single(sw->dev, buf + SKB_HEAD_ALIGN,
 					   RX_SEGMENT_MRU, DMA_FROM_DEVICE);
+
 		if (dma_mapping_error(sw->dev, desc->sdp))
 			return -EIO;
 
@@ -918,12 +926,14 @@ static int init_rings(struct sw *sw)
 
 	/* Setup TX buffers */
 	memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
+
 	for (i = 0; i < TX_DESCS; i++) {
 		struct tx_desc *desc = &(tx_ring)->desc[i];
 		tx_ring->buff_tab[i] = 0;
 
 		if (i == (TX_DESCS - 1))
 			desc->eor = 1;
+
 		desc->cown = 1;
 	}
 	__raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
@@ -944,8 +954,7 @@ static void destroy_rings(struct sw *sw)
 		if (!buf)
 			continue;
 
-		dma_unmap_single(sw->dev, desc->sdp, RX_SEGMENT_MRU,
-				 DMA_FROM_DEVICE);
+		dma_unmap_single(sw->dev, desc->sdp, RX_SEGMENT_MRU, DMA_FROM_DEVICE);
 		skb_free_frag(buf);
 	}
 
-- 
2.30.2