--- /dev/null
+Index: compat-rdma/drivers/infiniband/core/umem.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/core/umem.c
++++ compat-rdma/drivers/infiniband/core/umem.c
+@@ -137,7 +137,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
+
+ down_write(¤t->mm->mmap_sem);
+
+- locked = npages + current->mm->pinned_vm;
++ locked = npages + current->mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+@@ -207,7 +207,7 @@ out:
+ __ib_umem_release(context->device, umem, 0);
+ kfree(umem);
+ } else
+- current->mm->pinned_vm = locked;
++ current->mm->locked_vm = locked;
+
+ up_write(¤t->mm->mmap_sem);
+ if (vma_list)
+@@ -223,7 +223,7 @@ static void ib_umem_account(struct work_
+ struct ib_umem *umem = container_of(work, struct ib_umem, work);
+
+ down_write(&umem->mm->mmap_sem);
+- umem->mm->pinned_vm -= umem->diff;
++ umem->mm->locked_vm -= umem->diff;
+ up_write(&umem->mm->mmap_sem);
+ mmput(umem->mm);
+ kfree(umem);
+Index: compat-rdma/drivers/infiniband/hw/ipath/ipath_user_pages.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/hw/ipath/ipath_user_pages.c
++++ compat-rdma/drivers/infiniband/hw/ipath/ipath_user_pages.c
+@@ -79,7 +79,7 @@ static int __ipath_get_user_pages(unsign
+ goto bail_release;
+ }
+
+- current->mm->pinned_vm += num_pages;
++ current->mm->locked_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+@@ -178,7 +178,7 @@ void ipath_release_user_pages(struct pag
+
+ __ipath_release_user_pages(p, num_pages, 1);
+
+- current->mm->pinned_vm -= num_pages;
++ current->mm->locked_vm -= num_pages;
+
+ up_write(¤t->mm->mmap_sem);
+ }
+@@ -195,7 +195,7 @@ static void user_pages_account(struct wo
+ container_of(_work, struct ipath_user_pages_work, work);
+
+ down_write(&work->mm->mmap_sem);
+- work->mm->pinned_vm -= work->num_pages;
++ work->mm->locked_vm -= work->num_pages;
+ up_write(&work->mm->mmap_sem);
+ mmput(work->mm);
+ kfree(work);
+Index: compat-rdma/drivers/infiniband/hw/qib/qib_user_pages.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/hw/qib/qib_user_pages.c
++++ compat-rdma/drivers/infiniband/hw/qib/qib_user_pages.c
+@@ -74,7 +74,7 @@ static int __qib_get_user_pages(unsigned
+ goto bail_release;
+ }
+
+- current->mm->pinned_vm += num_pages;
++ current->mm->locked_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+@@ -151,7 +151,7 @@ void qib_release_user_pages(struct page
+ __qib_release_user_pages(p, num_pages, 1);
+
+ if (current->mm) {
+- current->mm->pinned_vm -= num_pages;
++ current->mm->locked_vm -= num_pages;
+ up_write(¤t->mm->mmap_sem);
+ }
+ }
--- /dev/null
+Index: compat-rdma/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+===================================================================
+--- compat-rdma.orig/drivers/net/ethernet/mellanox/mlx4/en_rx.c 2012-01-09 16:15:04.000000000 +0200
++++ compat-rdma/drivers/net/ethernet/mellanox/mlx4/en_rx.c 2012-01-23 11:52:39.899175000 +0200
+@@ -44,7 +44,7 @@
+
+ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+- struct page_frag *skb_frags,
++ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *ring_alloc,
+ int i)
+ {
+@@ -61,7 +61,7 @@ static int mlx4_en_alloc_frag(struct mlx
+ return -ENOMEM;
+
+ skb_frags[i].page = page_alloc->page;
+- skb_frags[i].offset = page_alloc->offset;
++ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->page = page;
+ page_alloc->offset = frag_info->frag_align;
+ } else {
+@@ -69,11 +69,11 @@ static int mlx4_en_alloc_frag(struct mlx
+ get_page(page);
+
+ skb_frags[i].page = page;
+- skb_frags[i].offset = page_alloc->offset;
++ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->offset += frag_info->frag_stride;
+ }
+ dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
+- skb_frags[i].offset, frag_info->frag_size,
++ skb_frags[i].page_offset, frag_info->frag_size,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->data[i].addr = cpu_to_be64(dma);
+ return 0;
+@@ -157,8 +157,8 @@ static int mlx4_en_prepare_rx_desc(struc
+ struct mlx4_en_rx_ring *ring, int index)
+ {
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+- struct page_frag *skb_frags = ring->rx_info +
+- (index << priv->log_rx_info);
++ struct skb_frag_struct *skb_frags = ring->rx_info +
++ (index << priv->log_rx_info);
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++)
+@@ -183,7 +183,7 @@ static void mlx4_en_free_rx_desc(struct
+ int index)
+ {
+ struct mlx4_en_dev *mdev = priv->mdev;
+- struct page_frag *skb_frags;
++ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
+ dma_addr_t dma;
+ int nr;
+@@ -194,7 +194,7 @@ static void mlx4_en_free_rx_desc(struct
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+ en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
+- pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
++ pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags[nr]),
+ PCI_DMA_FROMDEVICE);
+ put_page(skb_frags[nr].page);
+ }
+@@ -403,7 +403,7 @@ void mlx4_en_deactivate_rx_ring(struct m
+ /* Unmap a completed descriptor and free unused pages */
+ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+- struct page_frag *skb_frags,
++ struct skb_frag_struct *skb_frags,
+ struct sk_buff *skb,
+ struct mlx4_en_rx_alloc *page_alloc,
+ int length)
+@@ -421,9 +421,9 @@ static int mlx4_en_complete_rx_desc(stru
+ break;
+
+ /* Save page reference in skb */
+- __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
+- skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
+- skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
++ skb_frags_rx[nr].page = skb_frags[nr].page;
++ skb_frag_size_set(&skb_frags_rx[nr], skb_frag_size(&skb_frags[nr]));
++ skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+ skb->truesize += frag_info->frag_stride;
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+@@ -446,7 +446,7 @@ fail:
+ * the descriptor) of this packet; remaining fragments are reused... */
+ while (nr > 0) {
+ nr--;
+- __skb_frag_unref(&skb_frags_rx[nr]);
++ put_page(skb_frags_rx[nr].page);
+ }
+ return 0;
+ }
+@@ -454,7 +454,7 @@ fail:
+
+ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+- struct page_frag *skb_frags,
++ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *page_alloc,
+ unsigned int length)
+ {
+@@ -475,7 +475,7 @@ static struct sk_buff *mlx4_en_rx_skb(st
+
+ /* Get pointer to first fragment so we could copy the headers into the
+ * (linear part of the) skb */
+- va = page_address(skb_frags[0].page) + skb_frags[0].offset;
++ va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+
+ if (length <= SMALL_PACKET_SIZE) {
+ /* We are copying all relevant data to the skb - temporarily
+@@ -533,7 +533,7 @@ int mlx4_en_process_rx_cq(struct net_dev
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_cqe *cqe;
+ struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+- struct page_frag *skb_frags;
++ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ int index;
--- /dev/null
+Index: compat-rdma/drivers/infiniband/ulp/iser/iscsi_iser.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ compat-rdma/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -719,7 +719,9 @@ static struct iscsi_transport iscsi_iser
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+ .destroy_conn = iscsi_iser_conn_destroy,
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(3,2,0))
+ .attr_is_visible = iser_attr_is_visible,
++#endif
+ .set_param = iscsi_iser_set_param,
+ .get_conn_param = iscsi_conn_get_param,
+ .get_ep_param = iscsi_iser_get_ep_param,
--- /dev/null
+Index: compat-rdma/drivers/infiniband/core/umem.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/core/umem.c
++++ compat-rdma/drivers/infiniband/core/umem.c
+@@ -137,7 +137,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
+
+ down_write(¤t->mm->mmap_sem);
+
+- locked = npages + current->mm->pinned_vm;
++ locked = npages + current->mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+@@ -207,7 +207,7 @@ out:
+ __ib_umem_release(context->device, umem, 0);
+ kfree(umem);
+ } else
+- current->mm->pinned_vm = locked;
++ current->mm->locked_vm = locked;
+
+ up_write(¤t->mm->mmap_sem);
+ if (vma_list)
+@@ -223,7 +223,7 @@ static void ib_umem_account(struct work_
+ struct ib_umem *umem = container_of(work, struct ib_umem, work);
+
+ down_write(&umem->mm->mmap_sem);
+- umem->mm->pinned_vm -= umem->diff;
++ umem->mm->locked_vm -= umem->diff;
+ up_write(&umem->mm->mmap_sem);
+ mmput(umem->mm);
+ kfree(umem);
+Index: compat-rdma/drivers/infiniband/hw/ipath/ipath_user_pages.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/hw/ipath/ipath_user_pages.c
++++ compat-rdma/drivers/infiniband/hw/ipath/ipath_user_pages.c
+@@ -79,7 +79,7 @@ static int __ipath_get_user_pages(unsign
+ goto bail_release;
+ }
+
+- current->mm->pinned_vm += num_pages;
++ current->mm->locked_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+@@ -178,7 +178,7 @@ void ipath_release_user_pages(struct pag
+
+ __ipath_release_user_pages(p, num_pages, 1);
+
+- current->mm->pinned_vm -= num_pages;
++ current->mm->locked_vm -= num_pages;
+
+ up_write(¤t->mm->mmap_sem);
+ }
+@@ -195,7 +195,7 @@ static void user_pages_account(struct wo
+ container_of(_work, struct ipath_user_pages_work, work);
+
+ down_write(&work->mm->mmap_sem);
+- work->mm->pinned_vm -= work->num_pages;
++ work->mm->locked_vm -= work->num_pages;
+ up_write(&work->mm->mmap_sem);
+ mmput(work->mm);
+ kfree(work);
+Index: compat-rdma/drivers/infiniband/hw/qib/qib_user_pages.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/hw/qib/qib_user_pages.c
++++ compat-rdma/drivers/infiniband/hw/qib/qib_user_pages.c
+@@ -74,7 +74,7 @@ static int __qib_get_user_pages(unsigned
+ goto bail_release;
+ }
+
+- current->mm->pinned_vm += num_pages;
++ current->mm->locked_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+@@ -151,7 +151,7 @@ void qib_release_user_pages(struct page
+ __qib_release_user_pages(p, num_pages, 1);
+
+ if (current->mm) {
+- current->mm->pinned_vm -= num_pages;
++ current->mm->locked_vm -= num_pages;
+ up_write(¤t->mm->mmap_sem);
+ }
+ }
--- /dev/null
+Index: compat-rdma/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+===================================================================
+--- compat-rdma.orig/drivers/net/ethernet/mellanox/mlx4/en_rx.c 2012-01-09 16:15:04.000000000 +0200
++++ compat-rdma/drivers/net/ethernet/mellanox/mlx4/en_rx.c 2012-01-23 11:52:39.899175000 +0200
+@@ -44,7 +44,7 @@
+
+ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+- struct page_frag *skb_frags,
++ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *ring_alloc,
+ int i)
+ {
+@@ -61,7 +61,7 @@ static int mlx4_en_alloc_frag(struct mlx
+ return -ENOMEM;
+
+ skb_frags[i].page = page_alloc->page;
+- skb_frags[i].offset = page_alloc->offset;
++ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->page = page;
+ page_alloc->offset = frag_info->frag_align;
+ } else {
+@@ -69,11 +69,11 @@ static int mlx4_en_alloc_frag(struct mlx
+ get_page(page);
+
+ skb_frags[i].page = page;
+- skb_frags[i].offset = page_alloc->offset;
++ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->offset += frag_info->frag_stride;
+ }
+ dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
+- skb_frags[i].offset, frag_info->frag_size,
++ skb_frags[i].page_offset, frag_info->frag_size,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->data[i].addr = cpu_to_be64(dma);
+ return 0;
+@@ -157,8 +157,8 @@ static int mlx4_en_prepare_rx_desc(struc
+ struct mlx4_en_rx_ring *ring, int index)
+ {
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+- struct page_frag *skb_frags = ring->rx_info +
+- (index << priv->log_rx_info);
++ struct skb_frag_struct *skb_frags = ring->rx_info +
++ (index << priv->log_rx_info);
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++)
+@@ -183,7 +183,7 @@ static void mlx4_en_free_rx_desc(struct
+ int index)
+ {
+ struct mlx4_en_dev *mdev = priv->mdev;
+- struct page_frag *skb_frags;
++ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
+ dma_addr_t dma;
+ int nr;
+@@ -194,7 +194,7 @@ static void mlx4_en_free_rx_desc(struct
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+ en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
+- pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
++ pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags[nr]),
+ PCI_DMA_FROMDEVICE);
+ put_page(skb_frags[nr].page);
+ }
+@@ -403,7 +403,7 @@ void mlx4_en_deactivate_rx_ring(struct m
+ /* Unmap a completed descriptor and free unused pages */
+ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+- struct page_frag *skb_frags,
++ struct skb_frag_struct *skb_frags,
+ struct sk_buff *skb,
+ struct mlx4_en_rx_alloc *page_alloc,
+ int length)
+@@ -421,9 +421,9 @@ static int mlx4_en_complete_rx_desc(stru
+ break;
+
+ /* Save page reference in skb */
+- __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
+- skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
+- skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
++ skb_frags_rx[nr].page = skb_frags[nr].page;
++ skb_frag_size_set(&skb_frags_rx[nr], skb_frag_size(&skb_frags[nr]));
++ skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+ skb->truesize += frag_info->frag_stride;
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+@@ -446,7 +446,7 @@ fail:
+ * the descriptor) of this packet; remaining fragments are reused... */
+ while (nr > 0) {
+ nr--;
+- __skb_frag_unref(&skb_frags_rx[nr]);
++ put_page(skb_frags_rx[nr].page);
+ }
+ return 0;
+ }
+@@ -454,7 +454,7 @@ fail:
+
+ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+- struct page_frag *skb_frags,
++ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *page_alloc,
+ unsigned int length)
+ {
+@@ -475,7 +475,7 @@ static struct sk_buff *mlx4_en_rx_skb(st
+
+ /* Get pointer to first fragment so we could copy the headers into the
+ * (linear part of the) skb */
+- va = page_address(skb_frags[0].page) + skb_frags[0].offset;
++ va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+
+ if (length <= SMALL_PACKET_SIZE) {
+ /* We are copying all relevant data to the skb - temporarily
+@@ -533,7 +533,7 @@ int mlx4_en_process_rx_cq(struct net_dev
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_cqe *cqe;
+ struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+- struct page_frag *skb_frags;
++ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ int index;
--- /dev/null
+Index: compat-rdma/drivers/infiniband/ulp/iser/iscsi_iser.c
+===================================================================
+--- compat-rdma.orig/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ compat-rdma/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -719,7 +719,9 @@ static struct iscsi_transport iscsi_iser
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+ .destroy_conn = iscsi_iser_conn_destroy,
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(3,2,0))
+ .attr_is_visible = iser_attr_is_visible,
++#endif
+ .set_param = iscsi_iser_set_param,
+ .get_conn_param = iscsi_conn_get_param,
+ .get_ep_param = iscsi_iser_get_ep_param,
--- /dev/null
+
+compat-rdma patches
+=======================
+
+You must have a really good reason to be adding files
+in this directory. Your reasoning should either match the
+explanation already present on the top of each patch file
+or you should add your own.
+
+We try to avoid having patch files because:
+
+ * Its a pain in the ass to maintain them.
+
+ * Most backport changes can be pulled off through
+ some macro magic or new files which implement
+ the new functionality on the old kernels.