--- /dev/null
+diff -rup a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
+--- a/drivers/infiniband/core/netlink.c 2012-05-18 13:52:58.000000000 -0400
++++ b/drivers/infiniband/core/netlink.c 2012-05-18 13:52:13.000000000 -0400
+@@ -151,7 +151,7 @@ static int ibnl_rcv_msg(struct sk_buff *
+ return -EINVAL;
+ return netlink_dump_start(nls, skb, nlh,
+ client->cb_table[op].dump,
+- NULL);
++ NULL, 0);
+ }
+ }
+
--- /dev/null
+Index: compat-rdma/drivers/infiniband/core/umem.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/core/umem.c
++++ compat-rdma/drivers/infiniband/core/umem.c
+@@ -137,7 +137,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
+
+ down_write(¤t->mm->mmap_sem);
+
+- locked = npages + current->mm->pinned_vm;
++ locked = npages + current->mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+@@ -207,7 +207,7 @@ out:
+ __ib_umem_release(context->device, umem, 0);
+ kfree(umem);
+ } else
+- current->mm->pinned_vm = locked;
++ current->mm->locked_vm = locked;
+
+ up_write(¤t->mm->mmap_sem);
+ if (vma_list)
+@@ -223,7 +223,7 @@ static void ib_umem_account(struct work_
+ struct ib_umem *umem = container_of(work, struct ib_umem, work);
+
+ down_write(&umem->mm->mmap_sem);
+- umem->mm->pinned_vm -= umem->diff;
++ umem->mm->locked_vm -= umem->diff;
+ up_write(&umem->mm->mmap_sem);
+ mmput(umem->mm);
+ kfree(umem);
+Index: compat-rdma/drivers/infiniband/hw/ipath/ipath_user_pages.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/hw/ipath/ipath_user_pages.c
++++ compat-rdma/drivers/infiniband/hw/ipath/ipath_user_pages.c
+@@ -79,7 +79,7 @@ static int __ipath_get_user_pages(unsign
+ goto bail_release;
+ }
+
+- current->mm->pinned_vm += num_pages;
++ current->mm->locked_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+@@ -178,7 +178,7 @@ void ipath_release_user_pages(struct pag
+
+ __ipath_release_user_pages(p, num_pages, 1);
+
+- current->mm->pinned_vm -= num_pages;
++ current->mm->locked_vm -= num_pages;
+
+ up_write(¤t->mm->mmap_sem);
+ }
+@@ -195,7 +195,7 @@ static void user_pages_account(struct wo
+ container_of(_work, struct ipath_user_pages_work, work);
+
+ down_write(&work->mm->mmap_sem);
+- work->mm->pinned_vm -= work->num_pages;
++ work->mm->locked_vm -= work->num_pages;
+ up_write(&work->mm->mmap_sem);
+ mmput(work->mm);
+ kfree(work);
+Index: compat-rdma/drivers/infiniband/hw/qib/qib_user_pages.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/hw/qib/qib_user_pages.c
++++ compat-rdma/drivers/infiniband/hw/qib/qib_user_pages.c
+@@ -74,7 +74,7 @@ static int __qib_get_user_pages(unsigned
+ goto bail_release;
+ }
+
+- current->mm->pinned_vm += num_pages;
++ current->mm->locked_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+@@ -151,7 +151,7 @@ void qib_release_user_pages(struct page
+ __qib_release_user_pages(p, num_pages, 1);
+
+ if (current->mm) {
+- current->mm->pinned_vm -= num_pages;
++ current->mm->locked_vm -= num_pages;
+ up_write(¤t->mm->mmap_sem);
+ }
+ }
--- /dev/null
+Index: compat-rdma/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+===================================================================
+--- compat-rdma.orig/drivers/net/ethernet/mellanox/mlx4/en_rx.c 2012-01-09 16:15:04.000000000 +0200
++++ compat-rdma/drivers/net/ethernet/mellanox/mlx4/en_rx.c 2012-01-23 11:52:39.899175000 +0200
+@@ -44,7 +44,7 @@
+
+ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+- struct page_frag *skb_frags,
++ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *ring_alloc,
+ int i)
+ {
+@@ -61,7 +61,7 @@ static int mlx4_en_alloc_frag(struct mlx
+ return -ENOMEM;
+
+ skb_frags[i].page = page_alloc->page;
+- skb_frags[i].offset = page_alloc->offset;
++ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->page = page;
+ page_alloc->offset = frag_info->frag_align;
+ } else {
+@@ -69,11 +69,11 @@ static int mlx4_en_alloc_frag(struct mlx
+ get_page(page);
+
+ skb_frags[i].page = page;
+- skb_frags[i].offset = page_alloc->offset;
++ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->offset += frag_info->frag_stride;
+ }
+ dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
+- skb_frags[i].offset, frag_info->frag_size,
++ skb_frags[i].page_offset, frag_info->frag_size,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->data[i].addr = cpu_to_be64(dma);
+ return 0;
+@@ -157,8 +157,8 @@ static int mlx4_en_prepare_rx_desc(struc
+ struct mlx4_en_rx_ring *ring, int index)
+ {
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+- struct page_frag *skb_frags = ring->rx_info +
+- (index << priv->log_rx_info);
++ struct skb_frag_struct *skb_frags = ring->rx_info +
++ (index << priv->log_rx_info);
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++)
+@@ -183,7 +183,7 @@ static void mlx4_en_free_rx_desc(struct
+ int index)
+ {
+ struct mlx4_en_dev *mdev = priv->mdev;
+- struct page_frag *skb_frags;
++ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
+ dma_addr_t dma;
+ int nr;
+@@ -194,7 +194,7 @@ static void mlx4_en_free_rx_desc(struct
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+ en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
+- pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
++ pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags[nr]),
+ PCI_DMA_FROMDEVICE);
+ put_page(skb_frags[nr].page);
+ }
+@@ -403,7 +403,7 @@ void mlx4_en_deactivate_rx_ring(struct m
+ /* Unmap a completed descriptor and free unused pages */
+ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+- struct page_frag *skb_frags,
++ struct skb_frag_struct *skb_frags,
+ struct sk_buff *skb,
+ struct mlx4_en_rx_alloc *page_alloc,
+ int length)
+@@ -421,9 +421,9 @@ static int mlx4_en_complete_rx_desc(stru
+ break;
+
+ /* Save page reference in skb */
+- __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
+- skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
+- skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
++ skb_frags_rx[nr].page = skb_frags[nr].page;
++ skb_frag_size_set(&skb_frags_rx[nr], skb_frag_size(&skb_frags[nr]));
++ skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+ skb->truesize += frag_info->frag_stride;
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+@@ -446,7 +446,7 @@ fail:
+ * the descriptor) of this packet; remaining fragments are reused... */
+ while (nr > 0) {
+ nr--;
+- __skb_frag_unref(&skb_frags_rx[nr]);
++ put_page(skb_frags_rx[nr].page);
+ }
+ return 0;
+ }
+@@ -454,7 +454,7 @@ fail:
+
+ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+- struct page_frag *skb_frags,
++ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *page_alloc,
+ unsigned int length)
+ {
+@@ -475,7 +475,7 @@ static struct sk_buff *mlx4_en_rx_skb(st
+
+ /* Get pointer to first fragment so we could copy the headers into the
+ * (linear part of the) skb */
+- va = page_address(skb_frags[0].page) + skb_frags[0].offset;
++ va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+
+ if (length <= SMALL_PACKET_SIZE) {
+ /* We are copying all relevant data to the skb - temporarily
+@@ -533,7 +533,7 @@ int mlx4_en_process_rx_cq(struct net_dev
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_cqe *cqe;
+ struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+- struct page_frag *skb_frags;
++ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ int index;
--- /dev/null
+Index: compat-rdma/drivers/infiniband/ulp/iser/iscsi_iser.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ compat-rdma/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -719,7 +719,9 @@ static struct iscsi_transport iscsi_iser
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+ .destroy_conn = iscsi_iser_conn_destroy,
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(3,2,0))
+ .attr_is_visible = iser_attr_is_visible,
++#endif
+ .set_param = iscsi_iser_set_param,
+ .get_conn_param = iscsi_conn_get_param,
+ .get_ep_param = iscsi_iser_get_ep_param,
--- /dev/null
+From 5ea32823a4937cd918a823bd0dfbbb592e8b9e6e Mon Sep 17 00:00:00 2001
+From: Vipul Pandya <vipul@chelsio.com>
+Date: Mon, 7 May 2012 14:17:55 +0530
+Subject: [PATCH 1/2] cxgb4: Reversing convert to SKB paged frag API.
+
+This patch is a reverse patch of upstream commit
+e91b0f2491f7a7b21c4e562df09f3dbe551f0fe2
+
+Signed-off-by: Vipul Pandya <vipul@chelsio.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 2 +-
+ drivers/net/ethernet/chelsio/cxgb4/sge.c | 45 +++++++++++++--------------
+ 2 files changed, 23 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+index 0fe1885..223a7f7 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -326,7 +326,7 @@ struct sge_fl { /* SGE free-buffer queue state */
+
+ /* A packet gather list */
+ struct pkt_gl {
+- struct page_frag frags[MAX_SKB_FRAGS];
++ skb_frag_t frags[MAX_SKB_FRAGS];
+ void *va; /* virtual address of first byte */
+ unsigned int nfrags; /* # of fragments */
+ unsigned int tot_len; /* total length of fragments */
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index 140254c..c5e99fb 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -216,8 +216,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
+ end = &si->frags[si->nr_frags];
+
+ for (fp = si->frags; fp < end; fp++) {
+- *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
+- DMA_TO_DEVICE);
++ *++addr = dma_map_page(dev, fp->page, fp->page_offset,
++ skb_frag_size(fp), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+@@ -1410,23 +1410,22 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(cxgb4_ofld_send);
+
+-static inline void copy_frags(struct sk_buff *skb,
++static inline void copy_frags(struct skb_shared_info *ssi,
+ const struct pkt_gl *gl, unsigned int offset)
+ {
+- int i;
++ unsigned int n;
+
+ /* usually there's just one frag */
+- __skb_fill_page_desc(skb, 0, gl->frags[0].page,
+- gl->frags[0].offset + offset,
+- gl->frags[0].size - offset);
+- skb_shinfo(skb)->nr_frags = gl->nfrags;
+- for (i = 1; i < gl->nfrags; i++)
+- __skb_fill_page_desc(skb, i, gl->frags[i].page,
+- gl->frags[i].offset,
+- gl->frags[i].size);
++ ssi->frags[0].page = gl->frags[0].page;
++ ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
++ skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - offset);
++ ssi->nr_frags = gl->nfrags;
++ n = gl->nfrags - 1;
++ if (n)
++ memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
+
+ /* get a reference to the last page, we don't own it */
+- get_page(gl->frags[gl->nfrags - 1].page);
++ get_page(gl->frags[n].page);
+ }
+
+ /**
+@@ -1461,7 +1460,7 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+- copy_frags(skb, gl, pull_len);
++ copy_frags(skb_shinfo(skb), gl, pull_len);
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+@@ -1480,7 +1479,7 @@ EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
+ static void t4_pktgl_free(const struct pkt_gl *gl)
+ {
+ int n;
+- const struct page_frag *p;
++ const skb_frag_t *p;
+
+ for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
+ put_page(p->page);
+@@ -1524,7 +1523,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+ return;
+ }
+
+- copy_frags(skb, gl, RX_PKT_PAD);
++ copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
+ skb->len = gl->tot_len - RX_PKT_PAD;
+ skb->data_len = skb->len;
+ skb->truesize += skb->data_len;
+@@ -1700,7 +1699,7 @@ static int process_responses(struct sge_rspq *q, int budget)
+ rmb();
+ rsp_type = RSPD_TYPE(rc->type_gen);
+ if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+- struct page_frag *fp;
++ skb_frag_t *fp;
+ struct pkt_gl si;
+ const struct rx_sw_desc *rsd;
+ u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
+@@ -1719,9 +1718,9 @@ static int process_responses(struct sge_rspq *q, int budget)
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ bufsz = get_buf_size(rsd);
+ fp->page = rsd->page;
+- fp->offset = q->offset;
+- fp->size = min(bufsz, len);
+- len -= fp->size;
++ fp->page_offset = q->offset;
++ skb_frag_size_set(fp, min(bufsz, len));
++ len -= skb_frag_size(fp);
+ if (!len)
+ break;
+ unmap_rx_buf(q->adap, &rxq->fl);
+@@ -1733,16 +1732,16 @@ static int process_responses(struct sge_rspq *q, int budget)
+ */
+ dma_sync_single_for_cpu(q->adap->pdev_dev,
+ get_buf_addr(rsd),
+- fp->size, DMA_FROM_DEVICE);
++ skb_frag_size(fp), DMA_FROM_DEVICE);
+
+ si.va = page_address(si.frags[0].page) +
+- si.frags[0].offset;
++ si.frags[0].page_offset;
+ prefetch(si.va);
+
+ si.nfrags = frags + 1;
+ ret = q->handler(q, q->cur_desc, &si);
+ if (likely(ret == 0))
+- q->offset += ALIGN(fp->size, FL_ALIGN);
++ q->offset += ALIGN(skb_frag_size(fp), FL_ALIGN);
+ else
+ restore_rx_bufs(&si, &rxq->fl, frags);
+ } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+--
+1.7.1
+
--- /dev/null
+From e861ec83739c5c981a1ae96edb08062f73d52dc4 Mon Sep 17 00:00:00 2001
+From: Vipul Pandya <vipul@chelsio.com>
+Date: Mon, 7 May 2012 14:40:34 +0530
+Subject: [PATCH 2/2] cxgb4: Add build support for kernel 3.1
+
+Signed-off-by: Vipul Pandya <vipul@chelsio.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 8 ++++----
+ 1 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 4c8f42a..0b77fec 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -32,6 +32,9 @@
+ * SOFTWARE.
+ */
+
++#ifdef pr_fmt
++#undef pr_fmt
++#endif
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/bitmap.h>
+@@ -41,7 +44,6 @@
+ #include <linux/err.h>
+ #include <linux/etherdevice.h>
+ #include <linux/firmware.h>
+-#include <linux/if.h>
+ #include <linux/if_vlan.h>
+ #include <linux/init.h>
+ #include <linux/log2.h>
+@@ -1902,7 +1904,7 @@ static int set_rss_table(struct net_device *dev,
+ }
+
+ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+- u32 *rules)
++ void *rules)
+ {
+ const struct port_info *pi = netdev_priv(dev);
+
+@@ -3640,8 +3642,6 @@ static int __devinit init_one(struct pci_dev *pdev,
+ netdev->features |= netdev->hw_features | highdma;
+ netdev->vlan_features = netdev->features & VLAN_FEAT;
+
+- netdev->priv_flags |= IFF_UNICAST_FLT;
+-
+ netdev->netdev_ops = &cxgb4_netdev_ops;
+ SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ }
+--
+1.7.1
+
--- /dev/null
+From d6b1c6b1aaca4eaf89e0e0021bafa29af933063e Mon Sep 17 00:00:00 2001
+From: Vipul Pandya <vipul@chelsio.com>
+Date: Mon, 7 May 2012 15:50:15 +0530
+Subject: [PATCH 3/3] cxgb3: Reversing do vlan cleanup
+
+This patch is a reverse patch of the following upstream commit
+892ef5d85259e193505d553c10237fd5dc9a3d0d
+
+Signed-off-by: Vipul Pandya <vipul@chelsio.com>
+---
+ drivers/net/ethernet/chelsio/cxgb3/adapter.h | 2 +
+ drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | 51 ++++---------------
+ drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c | 9 ++-
+ drivers/net/ethernet/chelsio/cxgb3/sge.c | 35 +++++++++++--
+ 4 files changed, 48 insertions(+), 49 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+index 8b395b5..7300de5 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h
++++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+@@ -45,6 +45,7 @@
+ #include "t3cdev.h"
+ #include <asm/io.h>
+
++struct vlan_group;
+ struct adapter;
+ struct sge_qset;
+ struct port_info;
+@@ -65,6 +66,7 @@ struct iscsi_config {
+
+ struct port_info {
+ struct adapter *adapter;
++ struct vlan_group *vlan_grp;
+ struct sge_qset *qs;
+ u8 port_id;
+ u8 nqsets;
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+index 4d15c8f..d9ee262 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+@@ -2531,51 +2531,25 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
+ }
+ }
+
+-static void cxgb_vlan_mode(struct net_device *dev, u32 features)
++static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+ {
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+- if (adapter->params.rev > 0) {
+- t3_set_vlan_accel(adapter, 1 << pi->port_id,
+- features & NETIF_F_HW_VLAN_RX);
+- } else {
++ pi->vlan_grp = grp;
++ if (adapter->params.rev > 0)
++ t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
++ else {
+ /* single control for all ports */
+- unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
+-
++ unsigned int i, have_vlans = 0;
+ for_each_port(adapter, i)
+- have_vlans |=
+- adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
++ have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
+
+ t3_set_vlan_accel(adapter, 1, have_vlans);
+ }
+ t3_synchronize_rx(adapter, pi);
+ }
+
+-static u32 cxgb_fix_features(struct net_device *dev, u32 features)
+-{
+- /*
+- * Since there is no support for separate rx/tx vlan accel
+- * enable/disable make sure tx flag is always in same state as rx.
+- */
+- if (features & NETIF_F_HW_VLAN_RX)
+- features |= NETIF_F_HW_VLAN_TX;
+- else
+- features &= ~NETIF_F_HW_VLAN_TX;
+-
+- return features;
+-}
+-
+-static int cxgb_set_features(struct net_device *dev, u32 features)
+-{
+- u32 changed = dev->features ^ features;
+-
+- if (changed & NETIF_F_HW_VLAN_RX)
+- cxgb_vlan_mode(dev, features);
+-
+- return 0;
+-}
+-
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ static void cxgb_netpoll(struct net_device *dev)
+ {
+@@ -3156,8 +3130,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
+ .ndo_do_ioctl = cxgb_ioctl,
+ .ndo_change_mtu = cxgb_change_mtu,
+ .ndo_set_mac_address = cxgb_set_mac_addr,
+- .ndo_fix_features = cxgb_fix_features,
+- .ndo_set_features = cxgb_set_features,
++ .ndo_vlan_rx_register = vlan_rx_register,
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cxgb_netpoll,
+ #endif
+@@ -3289,8 +3262,9 @@ static int __devinit init_one(struct pci_dev *pdev,
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len - 1;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+- NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
+- netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
++ NETIF_F_TSO | NETIF_F_RXCSUM;
++ netdev->features |= netdev->hw_features |
++ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+@@ -3354,9 +3328,6 @@ static int __devinit init_one(struct pci_dev *pdev,
+ err = sysfs_create_group(&adapter->port[0]->dev.kobj,
+ &cxgb3_attr_group);
+
+- for_each_port(adapter, i)
+- cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features);
+-
+ print_port_info(adapter, ai);
+ return 0;
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+index 90ff131..d7cd560 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+@@ -177,13 +177,16 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
+ int i;
+
+ for_each_port(adapter, i) {
++ struct vlan_group *grp;
+ struct net_device *dev = adapter->port[i];
++ const struct port_info *p = netdev_priv(dev);
+
+ if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+ if (vlan && vlan != VLAN_VID_MASK) {
+- rcu_read_lock();
+- dev = __vlan_find_dev_deep(dev, vlan);
+- rcu_read_unlock();
++ grp = p->vlan_grp;
++ dev = NULL;
++ if (grp)
++ dev = vlan_group_get_device(grp, vlan);
+ } else if (netif_is_bond_slave(dev)) {
+ while (dev->master)
+ dev = dev->master;
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+index cfb60e1..a73523e 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+@@ -2028,11 +2028,28 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
+ skb_checksum_none_assert(skb);
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+
+- if (p->vlan_valid) {
++ if (unlikely(p->vlan_valid)) {
++ struct vlan_group *grp = pi->vlan_grp;
++
+ qs->port_stats[SGE_PSTAT_VLANEX]++;
+- __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
+- }
+- if (rq->polling) {
++ if (likely(grp))
++ if (lro)
++ vlan_gro_receive(&qs->napi, grp,
++ ntohs(p->vlan), skb);
++ else {
++ if (unlikely(pi->iscsic.flags)) {
++ unsigned short vtag = ntohs(p->vlan) &
++ VLAN_VID_MASK;
++ skb->dev = vlan_group_get_device(grp,
++ vtag);
++ cxgb3_process_iscsi_prov_pack(pi, skb);
++ }
++ __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
++ rq->polling);
++ }
++ else
++ dev_kfree_skb_any(skb);
++ } else if (rq->polling) {
+ if (lro)
+ napi_gro_receive(&qs->napi, skb);
+ else {
+@@ -2130,8 +2147,14 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
+
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+
+- if (cpl->vlan_valid)
+- __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
++ if (unlikely(cpl->vlan_valid)) {
++ struct vlan_group *grp = pi->vlan_grp;
++
++ if (likely(grp != NULL)) {
++ vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan));
++ return;
++ }
++ }
+ napi_gro_frags(&qs->napi);
+ }
+
+--
+1.7.1
+
--- /dev/null
+diff -rup a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
+--- a/drivers/infiniband/core/Makefile 2012-05-18 13:16:13.000000000 -0400
++++ b/drivers/infiniband/core/Makefile 2012-05-18 13:17:57.000000000 -0400
+@@ -30,3 +30,6 @@ ib_umad-y := user_mad.o
+ ib_ucm-y := ucm.o
+
+ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o
++
++# sles11sp2 specific
++CFLAGS_netlink.o += -DNEED_MIN_DUMP_ALLOC_ARG
2.6.39*)
echo 2.6.39
;;
+ 3.0.13*-*)
+ echo 3.0.13_sles11_sp2
+ ;;
3.0* | 2.6.40*)
echo 3.0
;;