]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
iwlwifi: configure the queues from the op_mode
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Thu, 21 Jun 2012 08:53:44 +0000 (11:53 +0300)
committerJohannes Berg <johannes.berg@intel.com>
Mon, 25 Jun 2012 07:37:58 +0000 (09:37 +0200)
Since the op_mode defines the queue mapping, let it do it
completely through the API functions.

Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
drivers/net/wireless/iwlwifi/dvm/commands.h
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/trans.c

index 64811cd91635887b2acbb93c2ec5cf0a444ea896..97bea16f359298ae1228a415757725c668c520e7 100644 (file)
@@ -190,6 +190,44 @@ enum {
        REPLY_MAX = 0xff
 };
 
+/*
+ * Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate
+ *  - 4 standard TX queues
+ *  - the command queue
+ *  - 4 PAN TX queues
+ *  - the PAN multicast queue, and
+ *  - the AUX (TX during scan dwell) queue.
+ */
+#define IWL_MIN_NUM_QUEUES     11
+
+/*
+ * Command queue depends on iPAN support.
+ */
+#define IWL_DEFAULT_CMD_QUEUE_NUM      4
+#define IWL_IPAN_CMD_QUEUE_NUM         9
+
+#define IWL_TX_FIFO_BK         0       /* shared */
+#define IWL_TX_FIFO_BE         1
+#define IWL_TX_FIFO_VI         2       /* shared */
+#define IWL_TX_FIFO_VO         3
+#define IWL_TX_FIFO_BK_IPAN    IWL_TX_FIFO_BK
+#define IWL_TX_FIFO_BE_IPAN    4
+#define IWL_TX_FIFO_VI_IPAN    IWL_TX_FIFO_VI
+#define IWL_TX_FIFO_VO_IPAN    5
+/* re-uses the VO FIFO, uCode will properly flush/schedule */
+#define IWL_TX_FIFO_AUX                5
+#define IWL_TX_FIFO_UNUSED     255
+
+#define IWLAGN_CMD_FIFO_NUM    7
+
+/*
+ * This queue number is required for proper operation
+ * because the ucode will stop/start the scheduler as
+ * required.
+ */
+#define IWL_IPAN_MCAST_QUEUE   8
+
 /******************************************************************************
  * (0)
  * Commonly used structures and definitions:
@@ -755,8 +793,6 @@ struct iwl_qosparam_cmd {
 #define IWLAGN_BROADCAST_ID    15
 #define        IWLAGN_STATION_COUNT    16
 
-#define        IWL_INVALID_STATION     255
-#define IWL_MAX_TID_COUNT      8
 #define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
 
 #define STA_FLG_TX_RATE_MSK            cpu_to_le32(1 << 2)
index 54cf085ddc8965d74cdf053750862afe51bff814..054f728f6266fb6f4f4425e7824f62a0e910730c 100644 (file)
 
 #define IWL_NUM_SCAN_RATES         (2)
 
-/*
- * Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate
- *  - 4 standard TX queues
- *  - the command queue
- *  - 4 PAN TX queues
- *  - the PAN multicast queue, and
- *  - the AUX (TX during scan dwell) queue.
- */
-#define IWL_MIN_NUM_QUEUES     11
-
-/*
- * Command queue depends on iPAN support.
- */
-#define IWL_DEFAULT_CMD_QUEUE_NUM      4
-#define IWL_IPAN_CMD_QUEUE_NUM         9
 
 #define IEEE80211_DATA_LEN              2304
 #define IEEE80211_4ADDR_LEN             30
index abfd7916bde60097da978e976a9fca685a6d2a0b..612f05d757db79d35bb62bdc0143cba48d9f8184 100644 (file)
@@ -518,49 +518,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
  * queue/FIFO/AC mapping definitions
  */
 
-#define IWL_TX_FIFO_BK         0       /* shared */
-#define IWL_TX_FIFO_BE         1
-#define IWL_TX_FIFO_VI         2       /* shared */
-#define IWL_TX_FIFO_VO         3
-#define IWL_TX_FIFO_BK_IPAN    IWL_TX_FIFO_BK
-#define IWL_TX_FIFO_BE_IPAN    4
-#define IWL_TX_FIFO_VI_IPAN    IWL_TX_FIFO_VI
-#define IWL_TX_FIFO_VO_IPAN    5
-/* re-uses the VO FIFO, uCode will properly flush/schedule */
-#define IWL_TX_FIFO_AUX                5
-#define IWL_TX_FIFO_UNUSED     -1
-
-#define IWLAGN_CMD_FIFO_NUM    7
-
-/*
- * This queue number is required for proper operation
- * because the ucode will stop/start the scheduler as
- * required.
- */
-#define IWL_IPAN_MCAST_QUEUE   8
-
-static const u8 iwlagn_default_queue_to_tx_fifo[] = {
-       IWL_TX_FIFO_VO,
-       IWL_TX_FIFO_VI,
-       IWL_TX_FIFO_BE,
-       IWL_TX_FIFO_BK,
-       IWLAGN_CMD_FIFO_NUM,
-};
-
-static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
-       IWL_TX_FIFO_VO,
-       IWL_TX_FIFO_VI,
-       IWL_TX_FIFO_BE,
-       IWL_TX_FIFO_BK,
-       IWL_TX_FIFO_BK_IPAN,
-       IWL_TX_FIFO_BE_IPAN,
-       IWL_TX_FIFO_VI_IPAN,
-       IWL_TX_FIFO_VO_IPAN,
-       IWL_TX_FIFO_BE_IPAN,
-       IWLAGN_CMD_FIFO_NUM,
-       IWL_TX_FIFO_AUX,
-};
-
 static const u8 iwlagn_bss_ac_to_fifo[] = {
        IWL_TX_FIFO_VO,
        IWL_TX_FIFO_VI,
@@ -1350,6 +1307,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
        else
                trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
        trans_cfg.command_names = iwl_dvm_cmd_strings;
+       trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
 
        WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
                priv->cfg->base_params->num_of_queues);
@@ -1363,15 +1321,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
        if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
                priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
                trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
-               trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
-               trans_cfg.n_queue_to_fifo =
-                       ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
        } else {
                priv->sta_key_max_num = STA_KEY_MAX_NUM;
                trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-               trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
-               trans_cfg.n_queue_to_fifo =
-                       ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
        }
 
        /* Configure transport layer */
@@ -1460,9 +1412,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
                ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
                priv->sta_key_max_num = STA_KEY_MAX_NUM;
                trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-               trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
-               trans_cfg.n_queue_to_fifo =
-                       ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
 
                /* Configure transport layer again*/
                iwl_trans_configure(priv->trans, &trans_cfg);
@@ -1480,9 +1429,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
                atomic_set(&priv->queue_stop_count[i], 0);
        }
 
-       WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
-                                               IWLAGN_CMD_FIFO_NUM);
-
        if (iwl_init_drv(priv))
                goto out_free_eeprom;
 
index b3a314ba48c7f59567c17011ffc112963368a2c7..6d8d6dd7943fc3cd2e58e872071078be8f4bfc0b 100644 (file)
@@ -226,13 +226,50 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
        return ret;
 }
 
+static const u8 iwlagn_default_queue_to_tx_fifo[] = {
+       IWL_TX_FIFO_VO,
+       IWL_TX_FIFO_VI,
+       IWL_TX_FIFO_BE,
+       IWL_TX_FIFO_BK,
+};
+
+static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
+       IWL_TX_FIFO_VO,
+       IWL_TX_FIFO_VI,
+       IWL_TX_FIFO_BE,
+       IWL_TX_FIFO_BK,
+       IWL_TX_FIFO_BK_IPAN,
+       IWL_TX_FIFO_BE_IPAN,
+       IWL_TX_FIFO_VI_IPAN,
+       IWL_TX_FIFO_VO_IPAN,
+       IWL_TX_FIFO_BE_IPAN,
+       IWL_TX_FIFO_UNUSED,
+       IWL_TX_FIFO_AUX,
+};
 
 static int iwl_alive_notify(struct iwl_priv *priv)
 {
+       const u8 *queue_to_txf;
+       u8 n_queues;
        int ret;
+       int i;
 
        iwl_trans_fw_alive(priv->trans);
 
+       if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
+           priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
+               n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
+               queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
+       } else {
+               n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
+               queue_to_txf = iwlagn_default_queue_to_tx_fifo;
+       }
+
+       for (i = 0; i < n_queues; i++)
+               if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
+                       iwl_trans_ac_txq_enable(priv->trans, i,
+                                               queue_to_txf[i]);
+
        priv->passive_no_rx = false;
        priv->transport_queue_stop = 0;
 
index 00efde8e5536bf674be9e3d612c8872930529aad..867d8e194da4adb420d63409ace08d3f0ef915b1 100644 (file)
@@ -290,16 +290,17 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
  * currently supports
  */
 #define IWL_MAX_HW_QUEUES              32
+#define IWL_INVALID_STATION    255
+#define IWL_MAX_TID_COUNT      8
+#define IWL_FRAME_LIMIT        64
 
 /**
  * struct iwl_trans_config - transport configuration
  *
  * @op_mode: pointer to the upper layer.
- * @queue_to_fifo: queue to FIFO mapping to set up by
- *     default
- * @n_queue_to_fifo: number of queues to set up
  * @cmd_queue: the index of the command queue.
  *     Must be set before start_fw.
+ * @cmd_fifo: the fifo for host commands
  * @no_reclaim_cmds: Some devices erroneously don't set the
  *     SEQ_RX_FRAME bit on some notifications, this is the
  *     list of such notifications to filter. Max length is
@@ -314,10 +315,9 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
  */
 struct iwl_trans_config {
        struct iwl_op_mode *op_mode;
-       const u8 *queue_to_fifo;
-       u8 n_queue_to_fifo;
 
        u8 cmd_queue;
+       u8 cmd_fifo;
        const u8 *no_reclaim_cmds;
        int n_no_reclaim_cmds;
 
@@ -355,9 +355,9 @@ struct iwl_trans;
  *     Must be atomic
  * @reclaim: free packet until ssn. Returns a list of freed packets.
  *     Must be atomic
- * @txq_enable: setup a tx queue for AMPDU - will be called once the HW is
- *     ready and a successful ADDBA response has been received.
- *     May sleep
+ * @txq_enable: setup a queue. To setup an AC queue, use the
+ *     iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
+ *     this one. The op_mode must not configure the HCMD queue. May sleep.
  * @txq_disable: de-configure a Tx queue to send AMPDUs
  *     Must be atomic
  * @wait_tx_queue_empty: wait until all tx queues are empty
@@ -497,9 +497,9 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
 {
        might_sleep();
 
-       trans->ops->fw_alive(trans);
-
        trans->state = IWL_TRANS_FW_ALIVE;
+
+       trans->ops->fw_alive(trans);
 }
 
 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
@@ -593,6 +593,13 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
                                 frame_limit, ssn);
 }
 
+static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
+                                          int fifo)
+{
+       iwl_trans_txq_enable(trans, queue, fifo, IWL_INVALID_STATION,
+                            IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
+}
+
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
 {
        WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
index 5024fb662bf678de27d97eb979d799016df2104c..d9694c58208c38cd60575bb48f37bc75f42884bd 100644 (file)
@@ -269,10 +269,9 @@ struct iwl_trans_pcie {
        wait_queue_head_t ucode_write_waitq;
        unsigned long status;
        u8 cmd_queue;
+       u8 cmd_fifo;
        u8 n_no_reclaim_cmds;
        u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
-       u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
-       u8 n_q_to_fifo;
 
        bool rx_buf_size_8k;
        u32 rx_page_order;
index 42f369d15f48146ed543d6430394d6281d742542..bac0eb0d046d5524c96f5199e91a86a993ce9589 100644 (file)
@@ -1059,7 +1059,7 @@ static void iwl_tx_start(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u32 a;
-       int i, chan;
+       int chan;
        u32 reg_val;
 
        /* make sure all queue are not stopped/used */
@@ -1091,12 +1091,8 @@ static void iwl_tx_start(struct iwl_trans *trans)
         */
        iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
 
-       for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
-               int fifo = trans_pcie->setup_q_to_fifo[i];
-
-               iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
-                                         IWL_TID_NON_QOS, SCD_FRAME_LIMIT, 0);
-       }
+       iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
+                               trans_pcie->cmd_fifo);
 
        /* Activate all Tx DMA/FIFO channels */
        iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
@@ -1528,6 +1524,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
        trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+       trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
        if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
                trans_pcie->n_no_reclaim_cmds = 0;
        else
@@ -1536,17 +1533,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
                memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
                       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
 
-       trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
-
-       if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
-               trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
-
-       /* at least the command queue must be mapped */
-       WARN_ON(!trans_pcie->n_q_to_fifo);
-
-       memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
-              trans_pcie->n_q_to_fifo * sizeof(u8));
-
        trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
        if (trans_pcie->rx_buf_size_8k)
                trans_pcie->rx_page_order = get_order(8 * 1024);