From: Alexander Duyck Date: Wed, 8 Feb 2012 07:50:04 +0000 (+0000) Subject: ixgbe: Add iterator for cycling through rings on a q_vector X-Git-Url: https://openfabrics.org/gitweb/?a=commitdiff_plain;h=a557928e26b08496c8f4b6c04e3838ad8048ad85;p=~shefty%2Frdma-dev.git ixgbe: Add iterator for cycling through rings on a q_vector Since there are multiple spots where we have to cycle through all of the rings on a q_vector it makes sense to just add a function for iterating through all of them. Signed-off-by: Alexander Duyck Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher --- diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 699899ac85d..03175cb7f07 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -296,6 +296,10 @@ struct ixgbe_ring_container { u8 itr; /* current ITR setting for ring */ }; +/* iterator for handling rings in ring container */ +#define ixgbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ ? 8 : 1) #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 604540d6c6b..95240ab2917 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -928,10 +928,10 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) if (q_vector->cpu == cpu) goto out_no_update; - for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_update_tx_dca(adapter, ring, cpu); - for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_update_rx_dca(adapter, ring, cpu); q_vector->cpu = cpu; @@ -1706,10 +1706,10 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) struct ixgbe_ring *ring; q_vector = adapter->q_vector[v_idx]; - for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); - for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); if (q_vector->tx.ring && !q_vector->rx.ring) { @@ -4195,7 +4195,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_update_dca(q_vector); #endif - for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->tx) clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); /* attempt to distribute budget to each queue fairly, but don't allow @@ -4205,7 +4205,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->rx) clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, per_ring_budget); @@ -4940,10 +4940,10 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbe_ring *ring; - for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->tx) adapter->tx_ring[ring->queue_index] = NULL; - for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->rx) adapter->rx_ring[ring->queue_index] = NULL; adapter->q_vector[v_idx] = NULL;