From 8a578111e343350ff8fa75fc3630d4bba5475cae Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Sun, 1 Jun 2008 18:09:35 +0200 Subject: [PATCH] mv643xx_eth: split out rx queue state Split all RX queue related state into 'struct rx_queue', in preparation for multiple RX queue support. Signed-off-by: Lennert Buytenhek Acked-by: Dale Farnsworth --- drivers/net/mv643xx_eth.c | 441 +++++++++++++++++++------------------- 1 file changed, 225 insertions(+), 216 deletions(-) diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 84e6c41becd..5bd4b38e953 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -73,13 +73,7 @@ static char mv643xx_eth_driver_version[] = "1.0"; #define MAX_DESCS_PER_SKB 1 #endif -#define ETH_VLAN_HLEN 4 -#define ETH_FCS_LEN 4 -#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ -#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ - ETH_VLAN_HLEN + ETH_FCS_LEN) -#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + \ - dma_get_cache_alignment()) +#define ETH_HW_IP_ALIGN 2 /* * Registers shared between all ports. @@ -288,22 +282,32 @@ struct mib_counters { u32 late_collision; }; +struct rx_queue { + int rx_ring_size; + + int rx_desc_count; + int rx_curr_desc; + int rx_used_desc; + + struct rx_desc *rx_desc_area; + dma_addr_t rx_desc_dma; + int rx_desc_area_size; + struct sk_buff **rx_skb; + + struct timer_list rx_oom; +}; + struct mv643xx_eth_private { struct mv643xx_eth_shared_private *shared; int port_num; /* User Ethernet port number */ struct mv643xx_eth_shared_private *shared_smi; - u32 rx_sram_addr; /* Base address of rx sram area */ - u32 rx_sram_size; /* Size of rx sram area */ u32 tx_sram_addr; /* Base address of tx sram area */ u32 tx_sram_size; /* Size of tx sram area */ /* Tx/Rx rings managment indexes fields. For driver use */ - /* Next available and first returning Rx resource */ - int rx_curr_desc, rx_used_desc; - /* Next available and first returning Tx resource */ int tx_curr_desc, tx_used_desc; @@ -311,11 +315,6 @@ struct mv643xx_eth_private { u32 tx_clean_threshold; #endif - struct rx_desc *rx_desc_area; - dma_addr_t rx_desc_dma; - int rx_desc_area_size; - struct sk_buff **rx_skb; - struct tx_desc *tx_desc_area; dma_addr_t tx_desc_dma; int tx_desc_area_size; @@ -324,27 +323,25 @@ struct mv643xx_eth_private { struct work_struct tx_timeout_task; struct net_device *dev; - struct napi_struct napi; struct mib_counters mib_counters; spinlock_t lock; /* Size of Tx Ring per queue */ int tx_ring_size; /* Number of tx descriptors in use */ int tx_desc_count; - /* Size of Rx Ring per queue */ - int rx_ring_size; - /* Number of rx descriptors in use */ - int rx_desc_count; - - /* - * Used in case RX Ring is empty, which can be caused when - * system does not have resources (skb's) - */ - struct timer_list timeout; u32 rx_int_coal; u32 tx_int_coal; struct mii_if_info mii; + + /* + * RX state. + */ + int default_rx_ring_size; + unsigned long rx_desc_sram_addr; + int rx_desc_sram_size; + struct napi_struct napi; + struct rx_queue rxq[1]; }; @@ -361,30 +358,25 @@ static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) /* rxq/txq helper functions *************************************************/ -static void mv643xx_eth_port_enable_rx(struct mv643xx_eth_private *mp, - unsigned int queues) +static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) { - wrl(mp, RXQ_COMMAND(mp->port_num), queues); + return container_of(rxq, struct mv643xx_eth_private, rxq[0]); } -static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_eth_private *mp) +static void rxq_enable(struct rx_queue *rxq) { - unsigned int port_num = mp->port_num; - u32 queues; - - /* Stop Rx port activity. Check port Rx activity. */ - queues = rdl(mp, RXQ_COMMAND(port_num)) & 0xFF; - if (queues) { - /* Issue stop command for active queues only */ - wrl(mp, RXQ_COMMAND(port_num), (queues << 8)); + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + wrl(mp, RXQ_COMMAND(mp->port_num), 1); +} - /* Wait for all Rx activity to terminate. */ - /* Check port cause register that all Rx queues are stopped */ - while (rdl(mp, RXQ_COMMAND(port_num)) & 0xFF) - udelay(10); - } +static void rxq_disable(struct rx_queue *rxq) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + u8 mask = 1; - return queues; + wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8); + while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask) + udelay(10); } static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private *mp, @@ -421,19 +413,29 @@ static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private *mp) /* rx ***********************************************************************/ static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev); -static void mv643xx_eth_rx_refill_descs(struct net_device *dev) +static void rxq_refill(struct rx_queue *rxq) { - struct mv643xx_eth_private *mp = netdev_priv(dev); + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); unsigned long flags; spin_lock_irqsave(&mp->lock, flags); - while (mp->rx_desc_count < mp->rx_ring_size) { + while (rxq->rx_desc_count < rxq->rx_ring_size) { + int skb_size; struct sk_buff *skb; int unaligned; int rx; - skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment()); + /* + * Reserve 2+14 bytes for an ethernet header (the + * hardware automatically prepends 2 bytes of dummy + * data to each received packet), 4 bytes for a VLAN + * header, and 4 bytes for the trailing FCS -- 24 + * bytes total. + */ + skb_size = mp->dev->mtu + 24; + + skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); if (skb == NULL) break; @@ -441,44 +443,43 @@ static void mv643xx_eth_rx_refill_descs(struct net_device *dev) if (unaligned) skb_reserve(skb, dma_get_cache_alignment() - unaligned); - mp->rx_desc_count++; - rx = mp->rx_used_desc; - mp->rx_used_desc = (rx + 1) % mp->rx_ring_size; + rxq->rx_desc_count++; + rx = rxq->rx_used_desc; + rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size; - mp->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, - skb->data, - ETH_RX_SKB_SIZE, - DMA_FROM_DEVICE); - mp->rx_desc_area[rx].buf_size = ETH_RX_SKB_SIZE; - mp->rx_skb[rx] = skb; + rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data, + skb_size, DMA_FROM_DEVICE); + rxq->rx_desc_area[rx].buf_size = skb_size; + rxq->rx_skb[rx] = skb; wmb(); - mp->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA | + rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; wmb(); skb_reserve(skb, ETH_HW_IP_ALIGN); } - if (mp->rx_desc_count == 0) { - mp->timeout.expires = jiffies + (HZ / 10); - add_timer(&mp->timeout); + if (rxq->rx_desc_count == 0) { + rxq->rx_oom.expires = jiffies + (HZ / 10); + add_timer(&rxq->rx_oom); } spin_unlock_irqrestore(&mp->lock, flags); } -static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) +static inline void rxq_refill_timer_wrapper(unsigned long data) { - mv643xx_eth_rx_refill_descs((struct net_device *)data); + rxq_refill((struct rx_queue *)data); } -static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) +static int rxq_process(struct rx_queue *rxq, int budget) { - struct mv643xx_eth_private *mp = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - unsigned int received_packets = 0; + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + struct net_device_stats *stats = &mp->dev->stats; + int rx; - while (budget-- > 0) { + rx = 0; + while (rx < budget) { struct sk_buff *skb; volatile struct rx_desc *rx_desc; unsigned int cmd_sts; @@ -486,7 +487,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) spin_lock_irqsave(&mp->lock, flags); - rx_desc = &mp->rx_desc_area[mp->rx_curr_desc]; + rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; cmd_sts = rx_desc->cmd_sts; if (cmd_sts & BUFFER_OWNED_BY_DMA) { @@ -495,17 +496,17 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) } rmb(); - skb = mp->rx_skb[mp->rx_curr_desc]; - mp->rx_skb[mp->rx_curr_desc] = NULL; + skb = rxq->rx_skb[rxq->rx_curr_desc]; + rxq->rx_skb[rxq->rx_curr_desc] = NULL; - mp->rx_curr_desc = (mp->rx_curr_desc + 1) % mp->rx_ring_size; + rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size; spin_unlock_irqrestore(&mp->lock, flags); dma_unmap_single(NULL, rx_desc->buf_ptr + ETH_HW_IP_ALIGN, - ETH_RX_SKB_SIZE, DMA_FROM_DEVICE); - mp->rx_desc_count--; - received_packets++; + mp->dev->mtu + 24, DMA_FROM_DEVICE); + rxq->rx_desc_count--; + rx++; /* * Update statistics. @@ -528,7 +529,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) printk(KERN_ERR "%s: Received packet spread " "on multiple descriptors\n", - dev->name); + mp->dev->name); } if (cmd_sts & ERROR_SUMMARY) stats->rx_errors++; @@ -546,48 +547,45 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) skb->csum = htons( (cmd_sts & 0x0007fff8) >> 3); } - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, mp->dev); #ifdef MV643XX_ETH_NAPI netif_receive_skb(skb); #else netif_rx(skb); #endif } - dev->last_rx = jiffies; + mp->dev->last_rx = jiffies; } - mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ + rxq_refill(rxq); - return received_packets; + return rx; } #ifdef MV643XX_ETH_NAPI static int mv643xx_eth_poll(struct napi_struct *napi, int budget) { - struct mv643xx_eth_private *mp = container_of(napi, struct mv643xx_eth_private, napi); - struct net_device *dev = mp->dev; - unsigned int port_num = mp->port_num; - int work_done; + struct mv643xx_eth_private *mp; + int rx; + + mp = container_of(napi, struct mv643xx_eth_private, napi); #ifdef MV643XX_ETH_TX_FAST_REFILL if (++mp->tx_clean_threshold > 5) { - mv643xx_eth_free_completed_tx_descs(dev); + mv643xx_eth_free_completed_tx_descs(mp->dev); mp->tx_clean_threshold = 0; } #endif - work_done = 0; - if ((rdl(mp, RXQ_CURRENT_DESC_PTR(port_num))) - != (u32) mp->rx_used_desc) - work_done = mv643xx_eth_receive_queue(dev, budget); + rx = rxq_process(mp->rxq, budget); - if (work_done < budget) { - netif_rx_complete(dev, napi); - wrl(mp, INT_CAUSE(port_num), 0); - wrl(mp, INT_CAUSE_EXT(port_num), 0); - wrl(mp, INT_MASK(port_num), INT_RX | INT_EXT); + if (rx < budget) { + netif_rx_complete(mp->dev, napi); + wrl(mp, INT_CAUSE(mp->port_num), 0); + wrl(mp, INT_CAUSE_EXT(mp->port_num), 0); + wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT); } - return work_done; + return rx; } #endif @@ -1252,53 +1250,102 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) /* rx/tx queue initialisation ***********************************************/ -static void ether_init_rx_desc_ring(struct mv643xx_eth_private *mp) +static int rxq_init(struct mv643xx_eth_private *mp) { - volatile struct rx_desc *p_rx_desc; - int rx_desc_num = mp->rx_ring_size; + struct rx_queue *rxq = mp->rxq; + struct rx_desc *rx_desc; + int size; int i; - /* initialize the next_desc_ptr links in the Rx descriptors ring */ - p_rx_desc = (struct rx_desc *)mp->rx_desc_area; - for (i = 0; i < rx_desc_num; i++) { - p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma + - ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); + rxq->rx_ring_size = mp->default_rx_ring_size; + + rxq->rx_desc_count = 0; + rxq->rx_curr_desc = 0; + rxq->rx_used_desc = 0; + + size = rxq->rx_ring_size * sizeof(struct rx_desc); + + if (size <= mp->rx_desc_sram_size) { + rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, + mp->rx_desc_sram_size); + rxq->rx_desc_dma = mp->rx_desc_sram_addr; + } else { + rxq->rx_desc_area = dma_alloc_coherent(NULL, size, + &rxq->rx_desc_dma, + GFP_KERNEL); + } + + if (rxq->rx_desc_area == NULL) { + dev_printk(KERN_ERR, &mp->dev->dev, + "can't allocate rx ring (%d bytes)\n", size); + goto out; + } + memset(rxq->rx_desc_area, 0, size); + + rxq->rx_desc_area_size = size; + rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), + GFP_KERNEL); + if (rxq->rx_skb == NULL) { + dev_printk(KERN_ERR, &mp->dev->dev, + "can't allocate rx skb ring\n"); + goto out_free; + } + + rx_desc = (struct rx_desc *)rxq->rx_desc_area; + for (i = 0; i < rxq->rx_ring_size; i++) { + int nexti = (i + 1) % rxq->rx_ring_size; + rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + + nexti * sizeof(struct rx_desc); } - /* Save Rx desc pointer to driver struct. */ - mp->rx_curr_desc = 0; - mp->rx_used_desc = 0; + init_timer(&rxq->rx_oom); + rxq->rx_oom.data = (unsigned long)rxq; + rxq->rx_oom.function = rxq_refill_timer_wrapper; + + return 0; + + +out_free: + if (size <= mp->rx_desc_sram_size) + iounmap(rxq->rx_desc_area); + else + dma_free_coherent(NULL, size, + rxq->rx_desc_area, + rxq->rx_desc_dma); - mp->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); +out: + return -ENOMEM; } -static void mv643xx_eth_free_rx_rings(struct net_device *dev) +static void rxq_deinit(struct rx_queue *rxq) { - struct mv643xx_eth_private *mp = netdev_priv(dev); - int curr; + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + int i; + + rxq_disable(rxq); - /* Stop RX Queues */ - mv643xx_eth_port_disable_rx(mp); + del_timer_sync(&rxq->rx_oom); - /* Free preallocated skb's on RX rings */ - for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { - if (mp->rx_skb[curr]) { - dev_kfree_skb(mp->rx_skb[curr]); - mp->rx_desc_count--; + for (i = 0; i < rxq->rx_ring_size; i++) { + if (rxq->rx_skb[i]) { + dev_kfree_skb(rxq->rx_skb[i]); + rxq->rx_desc_count--; } } - if (mp->rx_desc_count) - printk(KERN_ERR - "%s: Error in freeing Rx Ring. %d skb's still" - " stuck in RX Ring - ignoring them\n", dev->name, - mp->rx_desc_count); - /* Free RX ring */ - if (mp->rx_sram_size) - iounmap(mp->rx_desc_area); + if (rxq->rx_desc_count) { + dev_printk(KERN_ERR, &mp->dev->dev, + "error freeing rx ring -- %d skbs stuck\n", + rxq->rx_desc_count); + } + + if (rxq->rx_desc_area_size <= mp->rx_desc_sram_size) + iounmap(rxq->rx_desc_area); else - dma_free_coherent(NULL, mp->rx_desc_area_size, - mp->rx_desc_area, mp->rx_desc_dma); + dma_free_coherent(NULL, rxq->rx_desc_area_size, + rxq->rx_desc_area, rxq->rx_desc_dma); + + kfree(rxq->rx_skb); } static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp) @@ -1510,7 +1557,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) } #else if (int_cause & INT_RX) - mv643xx_eth_receive_queue(dev, INT_MAX); + rxq_process(mp->rxq, INT_MAX); #endif if (int_cause_ext & INT_EXT_TX) mv643xx_eth_free_completed_tx_descs(dev); @@ -1544,20 +1591,30 @@ static void phy_reset(struct mv643xx_eth_private *mp) static void port_start(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); - unsigned int port_num = mp->port_num; - int tx_curr_desc, rx_curr_desc; u32 pscr; struct ethtool_cmd ethtool_cmd; + int i; - /* Assignment of Tx CTRP of given queue */ - tx_curr_desc = mp->tx_curr_desc; - wrl(mp, TXQ_CURRENT_DESC_PTR(port_num), - (u32)((struct tx_desc *)mp->tx_desc_dma + tx_curr_desc)); + /* + * Configure basic link parameters. + */ + pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); + pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); + pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | + DISABLE_AUTO_NEG_SPEED_GMII | + DISABLE_AUTO_NEG_FOR_DUPLEX | + DO_NOT_FORCE_LINK_FAIL | + SERIAL_PORT_CONTROL_RESERVED; + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); + pscr |= SERIAL_PORT_ENABLE; + wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); - /* Assignment of Rx CRDP of given queue */ - rx_curr_desc = mp->rx_curr_desc; - wrl(mp, RXQ_CURRENT_DESC_PTR(port_num), - (u32)((struct rx_desc *)mp->rx_desc_dma + rx_curr_desc)); + wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE); + + mv643xx_eth_get_settings(dev, ðtool_cmd); + phy_reset(mp); + mv643xx_eth_set_settings(dev, ðtool_cmd); /* Add the assigned Ethernet address to the port's address table */ uc_addr_set(mp, dev->dev_addr); @@ -1566,42 +1623,34 @@ static void port_start(struct net_device *dev) * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast * frames to RX queue #0. */ - wrl(mp, PORT_CONFIG(port_num), 0x00000000); + wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000); /* * Treat BPDUs as normal multicasts, and disable partition mode. */ - wrl(mp, PORT_CONFIG_EXT(port_num), 0x00000000); - - pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num)); - - pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); - wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr); + wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000); - pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | - DISABLE_AUTO_NEG_SPEED_GMII | - DISABLE_AUTO_NEG_FOR_DUPLEX | - DO_NOT_FORCE_LINK_FAIL | - SERIAL_PORT_CONTROL_RESERVED; + /* + * Enable the receive queue. + */ + for (i = 0; i < 1; i++) { + struct rx_queue *rxq = mp->rxq; + int off = RXQ_CURRENT_DESC_PTR(mp->port_num); + u32 addr; - wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr); + addr = (u32)rxq->rx_desc_dma; + addr += rxq->rx_curr_desc * sizeof(struct rx_desc); + wrl(mp, off, addr); - pscr |= SERIAL_PORT_ENABLE; - wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr); + rxq_enable(rxq); + } - /* Assign port SDMA configuration */ - wrl(mp, SDMA_CONFIG(port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE); - /* Enable port Rx. */ - mv643xx_eth_port_enable_rx(mp, 1); + wrl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num), + (u32)((struct tx_desc *)mp->tx_desc_dma + mp->tx_curr_desc)); /* Disable port bandwidth limits by clearing MTU register */ - wrl(mp, TX_BW_MTU(port_num), 0); - - /* save phy settings across reset */ - mv643xx_eth_get_settings(dev, ðtool_cmd); - phy_reset(mp); - mv643xx_eth_set_settings(dev, ðtool_cmd); + wrl(mp, TX_BW_MTU(mp->port_num), 0); } #ifdef MV643XX_ETH_COAL @@ -1661,18 +1710,11 @@ static int mv643xx_eth_open(struct net_device *dev) port_init(mp); - memset(&mp->timeout, 0, sizeof(struct timer_list)); - mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper; - mp->timeout.data = (unsigned long)dev; - - /* Allocate RX and TX skb rings */ - mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, - GFP_KERNEL); - if (!mp->rx_skb) { - printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name); - err = -ENOMEM; + err = rxq_init(mp); + if (err) goto out_free_irq; - } + rxq_refill(mp->rxq); + mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size, GFP_KERNEL); if (!mp->tx_skb) { @@ -1706,39 +1748,6 @@ static int mv643xx_eth_open(struct net_device *dev) ether_init_tx_desc_ring(mp); - /* Allocate RX ring */ - mp->rx_desc_count = 0; - size = mp->rx_ring_size * sizeof(struct rx_desc); - mp->rx_desc_area_size = size; - - if (mp->rx_sram_size) { - mp->rx_desc_area = ioremap(mp->rx_sram_addr, - mp->rx_sram_size); - mp->rx_desc_dma = mp->rx_sram_addr; - } else - mp->rx_desc_area = dma_alloc_coherent(NULL, size, - &mp->rx_desc_dma, - GFP_KERNEL); - - if (!mp->rx_desc_area) { - printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n", - dev->name, size); - printk(KERN_ERR "%s: Freeing previously allocated TX queues...", - dev->name); - if (mp->rx_sram_size) - iounmap(mp->tx_desc_area); - else - dma_free_coherent(NULL, mp->tx_desc_area_size, - mp->tx_desc_area, mp->tx_desc_dma); - err = -ENOMEM; - goto out_free_tx_skb; - } - memset((void *)mp->rx_desc_area, 0, size); - - ether_init_rx_desc_ring(mp); - - mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ - #ifdef MV643XX_ETH_NAPI napi_enable(&mp->napi); #endif @@ -1764,7 +1773,7 @@ static int mv643xx_eth_open(struct net_device *dev) out_free_tx_skb: kfree(mp->tx_skb); out_free_rx_skb: - kfree(mp->rx_skb); + rxq_deinit(mp->rxq); out_free_irq: free_irq(dev->irq, dev); @@ -1777,7 +1786,7 @@ static void port_reset(struct mv643xx_eth_private *mp) unsigned int reg_data; mv643xx_eth_port_disable_tx(mp); - mv643xx_eth_port_disable_rx(mp); + rxq_disable(mp->rxq); /* Clear all MIB counters */ clear_mib_counters(mp); @@ -1809,7 +1818,7 @@ static int mv643xx_eth_stop(struct net_device *dev) port_reset(mp); mv643xx_eth_free_tx_rings(dev); - mv643xx_eth_free_rx_rings(dev); + rxq_deinit(mp->rxq); free_irq(dev->irq, dev); @@ -2162,7 +2171,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) /* set default config values */ uc_addr_get(mp, dev->dev_addr); - mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; if (is_valid_ether_addr(pd->mac_addr)) @@ -2171,8 +2179,9 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (pd->phy_addr || pd->force_phy_addr) phy_addr_set(mp, pd->phy_addr); + mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE; if (pd->rx_queue_size) - mp->rx_ring_size = pd->rx_queue_size; + mp->default_rx_ring_size = pd->rx_queue_size; if (pd->tx_queue_size) mp->tx_ring_size = pd->tx_queue_size; @@ -2183,8 +2192,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) } if (pd->rx_sram_size) { - mp->rx_sram_size = pd->rx_sram_size; - mp->rx_sram_addr = pd->rx_sram_addr; + mp->rx_desc_sram_addr = pd->rx_sram_addr; + mp->rx_desc_sram_size = pd->rx_sram_size; } duplex = pd->duplex; -- 2.46.0