From 50c3da31c503be03e6547f0cf8c1a73ca6638fe1 Mon Sep 17 00:00:00 2001 From: Vladimir Sokolovsky Date: Mon, 11 Nov 2013 12:58:49 +0200 Subject: [PATCH] BACKPORTS: Added support for RHEL6.5 Modules: ib/core, ib_ipoib, mlx4 Signed-off-by: Vladimir Sokolovsky --- patches/0001-BACKPORT-ib_core.patch | 1046 ++++++++++++++ ...T-mlx4-mlx4_core-mlx4_en-and-mlx4_ib.patch | 1208 +++++++++++++++++ ...02-netlink-Avoid-pr_fmt-redefinition.patch | 22 - patches/0003-BACKPORT-ib_ipoib.patch | 218 +++ patches/0003-BACKPORT-mlx4_en.patch | 109 -- patches/0004-BACKPORT-mlx4_ib.patch | 38 - ...-Enable-header-file-inclusion-with-.patch} | 0 patches/quiltrc | 2 + 8 files changed, 2474 insertions(+), 169 deletions(-) create mode 100644 patches/0001-BACKPORT-ib_core.patch create mode 100644 patches/0002-BACKPORT-mlx4-mlx4_core-mlx4_en-and-mlx4_ib.patch delete mode 100644 patches/0002-netlink-Avoid-pr_fmt-redefinition.patch create mode 100644 patches/0003-BACKPORT-ib_ipoib.patch delete mode 100644 patches/0003-BACKPORT-mlx4_en.patch delete mode 100644 patches/0004-BACKPORT-mlx4_ib.patch rename patches/{0001-iw_cxgb3-iw_cxgb4-Enable-header-file-inclusion-with-.patch => 0004-iw_cxgb3-iw_cxgb4-Enable-header-file-inclusion-with-.patch} (100%) create mode 100644 patches/quiltrc diff --git a/patches/0001-BACKPORT-ib_core.patch b/patches/0001-BACKPORT-ib_core.patch new file mode 100644 index 0000000..0c7db91 --- /dev/null +++ b/patches/0001-BACKPORT-ib_core.patch @@ -0,0 +1,1046 @@ +From: Vladimir Sokolovsky +Subject: [PATCH] BACKPORT: ib_core + +Signed-off-by: Vladimir Sokolovsky +--- + drivers/infiniband/core/addr.c | 105 +++++++++++++++++++++++++++++++++ + drivers/infiniband/core/cm.c | 23 +++++++ + drivers/infiniband/core/cma.c | 43 +++++++++++++ + drivers/infiniband/core/fmr_pool.c | 7 ++ + drivers/infiniband/core/netlink.c | 20 ++++++ + drivers/infiniband/core/sa_query.c | 15 +++++ + drivers/infiniband/core/ucm.c | 38 ++++++++++++ + drivers/infiniband/core/ucma.c | 76 ++++++++++++++++++++++++ + drivers/infiniband/core/umem.c | 16 +++++ + drivers/infiniband/core/user_mad.c | 16 +++++ + drivers/infiniband/core/uverbs_cmd.c | 51 ++++++++++++++++ + drivers/infiniband/core/uverbs_main.c | 40 +++++++++++++ + 12 files changed, 450 insertions(+), 0 deletions(-) + +diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/addr.c ++++ b/drivers/infiniband/core/addr.c +@@ -192,28 +192,45 @@ static void queue_req(struct addr_req *req) + mutex_unlock(&lock); + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) + static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, void *daddr) ++#else ++static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr) ++#endif + { + struct neighbour *n; + int ret; + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) + n = dst_neigh_lookup(dst, daddr); ++#endif + + rcu_read_lock(); ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ++ n = dst_get_neighbour(dst); ++#endif + if (!n || !(n->nud_state & NUD_VALID)) { + if (n) + neigh_event_send(n, NULL); + ret = -ENODATA; + } else { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) + ret = rdma_copy_addr(dev_addr, dst->dev, n->ha); ++#else ++ ret = rdma_copy_addr(addr, dst->dev, n->ha); ++#endif + } + rcu_read_unlock(); + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) + if (n) + neigh_release(n); ++#endif + + return ret; + } ++#endif + + static int addr4_resolve(struct sockaddr_in *src_in, + struct sockaddr_in *dst_in, +@@ -222,9 +239,15 @@ static int addr4_resolve(struct sockaddr_in *src_in, + __be32 src_ip = src_in->sin_addr.s_addr; + __be32 dst_ip = dst_in->sin_addr.s_addr; + struct rtable *rt; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) + struct flowi4 fl4; ++#else ++ struct flowi fl; ++ struct neighbour *neigh; ++#endif + int ret; + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) + memset(&fl4, 0, sizeof(fl4)); + fl4.daddr = dst_ip; + fl4.saddr = src_ip; +@@ -234,10 +257,25 @@ static int addr4_resolve(struct sockaddr_in *src_in, + ret = PTR_ERR(rt); + goto out; + } ++#else ++ memset(&fl, 0, sizeof(fl)); ++ fl.nl_u.ip4_u.daddr = dst_ip; ++ fl.nl_u.ip4_u.saddr = src_ip; ++ fl.oif = addr->bound_dev_if; ++ ret = ip_route_output_key(&init_net, &rt, &fl); ++ if (ret) ++ goto out; ++#endif + src_in->sin_family = AF_INET; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) + src_in->sin_addr.s_addr = fl4.saddr; + + if (rt->dst.dev->flags & IFF_LOOPBACK) { ++#else ++ src_in->sin_addr.s_addr = rt->rt_src; ++ ++ if (rt->idev->dev->flags & IFF_LOOPBACK) { ++#endif + ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); + if (!ret) + memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); +@@ -245,12 +283,37 @@ static int addr4_resolve(struct sockaddr_in *src_in, + } + + /* If the device does ARP internally, return 'done' */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) + if (rt->dst.dev->flags & IFF_NOARP) { + ret = rdma_copy_addr(addr, rt->dst.dev, NULL); + goto put; + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) + ret = dst_fetch_ha(&rt->dst, addr, &fl4.daddr); ++#else ++ ret = dst_fetch_ha(&rt->dst, addr); ++#endif ++#else ++ if (rt->idev->dev->flags & IFF_NOARP) { ++ ret = rdma_copy_addr(addr, rt->idev->dev, NULL); ++ goto put; ++ } ++ ++ neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev); ++ if (!neigh || !(neigh->nud_state & NUD_VALID)) { ++ neigh_event_send(rt->u.dst.neighbour, NULL); ++ ret = -ENODATA; ++ if (neigh) ++ goto release; ++ goto put; ++ } ++ ++ ret = rdma_copy_addr(addr, neigh->dev, neigh->ha); ++release: ++ neigh_release(neigh); ++#endif ++ + put: + ip_rt_put(rt); + out: +@@ -262,10 +325,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, + struct sockaddr_in6 *dst_in, + struct rdma_dev_addr *addr) + { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) + struct flowi6 fl6; ++#else ++ struct flowi fl; ++ struct neighbour *neigh; ++#endif + struct dst_entry *dst; + int ret; + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) + memset(&fl6, 0, sizeof fl6); + fl6.daddr = dst_in->sin6_addr; + fl6.saddr = src_in->sin6_addr; +@@ -284,6 +353,26 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, + src_in->sin6_family = AF_INET6; + src_in->sin6_addr = fl6.saddr; + } ++#else ++ memset(&fl, 0, sizeof fl); ++ ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr); ++ ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr); ++ fl.oif = addr->bound_dev_if; ++ ++ dst = ip6_route_output(&init_net, NULL, &fl); ++ if ((ret = dst->error)) ++ goto put; ++ ++ if (ipv6_addr_any(&fl.fl6_src)) { ++ ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev, ++ &fl.fl6_dst, 0, &fl.fl6_src); ++ if (ret) ++ goto put; ++ ++ src_in->sin6_family = AF_INET6; ++ ipv6_addr_copy(&src_in->sin6_addr, &fl.fl6_src); ++ } ++#endif + + if (dst->dev->flags & IFF_LOOPBACK) { + ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); +@@ -298,7 +387,23 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, + goto put; + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) + ret = dst_fetch_ha(dst, addr, &fl6.daddr); ++#else ++ ret = dst_fetch_ha(dst, addr); ++#endif ++#else ++ neigh = dst->neighbour; ++ if (!neigh || !(neigh->nud_state & NUD_VALID)) { ++ neigh_event_send(dst->neighbour, NULL); ++ ret = -ENODATA; ++ goto put; ++ } ++ ++ ret = rdma_copy_addr(addr, dst->dev, neigh->ha); ++#endif ++ + put: + dst_release(dst); + return ret; +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -381,6 +381,7 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) + + static int cm_alloc_id(struct cm_id_private *cm_id_priv) + { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + unsigned long flags; + int id; + static int next_id; +@@ -397,6 +398,24 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv) + + cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; + return id < 0 ? id : 0; ++#else ++ unsigned long flags; ++ int ret, id; ++ static int next_id; ++ ++ do { ++ spin_lock_irqsave(&cm.lock, flags); ++ ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, ++ next_id, &id); ++ if (!ret) ++ next_id = ((unsigned) id + 1) & MAX_IDR_MASK; ++ ++ spin_unlock_irqrestore(&cm.lock, flags); ++ } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); ++ ++ cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; ++ return ret; ++#endif + } + + static void cm_free_id(__be32 local_id) +@@ -3660,7 +3679,11 @@ static struct kobj_type cm_port_obj_type = { + .release = cm_release_port_obj + }; + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) + static char *cm_devnode(struct device *dev, umode_t *mode) ++#else ++static char *cm_devnode(struct device *dev, mode_t *mode) ++#endif + { + if (mode) + *mode = 0666; +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -2269,6 +2269,7 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, + unsigned short snum) + { + struct rdma_bind_list *bind_list; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + int ret; + + bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); +@@ -2286,6 +2287,35 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, + err: + kfree(bind_list); + return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; ++#else ++ int port, ret; ++ ++ bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); ++ if (!bind_list) ++ return -ENOMEM; ++ ++ do { ++ ret = idr_get_new_above(ps, bind_list, snum, &port); ++ } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); ++ ++ if (ret) ++ goto err1; ++ ++ if (port != snum) { ++ ret = -EADDRNOTAVAIL; ++ goto err2; ++ } ++ ++ bind_list->ps = ps; ++ bind_list->port = (unsigned short) port; ++ cma_bind_port(bind_list, id_priv); ++ return 0; ++err2: ++ idr_remove(ps, port); ++err1: ++ kfree(bind_list); ++ return ret; ++#endif + } + + static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) +@@ -2330,9 +2360,16 @@ static int cma_check_port(struct rdma_bind_list *bind_list, + { + struct rdma_id_private *cur_id; + struct sockaddr *addr, *cur_addr; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ struct hlist_node *node; ++#endif + + addr = cma_src_addr(id_priv); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { ++#else + hlist_for_each_entry(cur_id, &bind_list->owners, node) { ++#endif + if (id_priv == cur_id) + continue; + +@@ -3412,9 +3449,15 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id + } + + static int cma_netdev_callback(struct notifier_block *self, unsigned long event, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + void *ptr) + { + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); ++#else ++ void *ctx) ++{ ++ struct net_device *ndev = (struct net_device *)ctx; ++#endif + struct cma_device *cma_dev; + struct rdma_id_private *id_priv; + int ret = NOTIFY_DONE; +diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/fmr_pool.c ++++ b/drivers/infiniband/core/fmr_pool.c +@@ -118,13 +118,20 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, + { + struct hlist_head *bucket; + struct ib_pool_fmr *fmr; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ struct hlist_node *pos; ++#endif + + if (!pool->cache_bucket) + return NULL; + + bucket = pool->cache_bucket + ib_fmr_hash(*page_list); + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ hlist_for_each_entry(fmr, pos, bucket, cache_node) ++#else + hlist_for_each_entry(fmr, bucket, cache_node) ++#endif + if (io_virtual_address == fmr->io_virtual_address && + page_list_len == fmr->page_list_len && + !memcmp(page_list, fmr->page_list, +diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/netlink.c ++++ b/drivers/infiniband/core/netlink.c +@@ -30,6 +30,9 @@ + * SOFTWARE. + */ + ++#ifdef pr_fmt ++#undef pr_fmt ++#endif + #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ + + #include +@@ -152,11 +155,19 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + return -EINVAL; + + { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) || defined(CONFIG_COMPAT_NETLINK_3_7) + struct netlink_dump_control c = { + .dump = client->cb_table[op].dump, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) || defined(CONFIG_COMPAT_NETLINK_3_7) + .module = client->cb_table[op].module, ++#endif + }; + return netlink_dump_start(nls, skb, nlh, &c); ++#else ++ return netlink_dump_start(nls, skb, nlh, ++ client->cb_table[op].dump, ++ NULL, 0); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */ + } + } + } +@@ -174,11 +185,20 @@ static void ibnl_rcv(struct sk_buff *skb) + + int __init ibnl_init(void) + { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) + struct netlink_kernel_cfg cfg = { + .input = ibnl_rcv, + }; + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg); ++#else ++ nls = netlink_kernel_create(&init_net, NETLINK_RDMA, THIS_MODULE, &cfg); ++#endif ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ ++ nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv, ++ NULL, THIS_MODULE); ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ + if (!nls) { + pr_warn("Failed to create netlink socket\n"); + return -ENOMEM; +diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/sa_query.c ++++ b/drivers/infiniband/core/sa_query.c +@@ -611,10 +611,13 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) + + static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) + { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + bool preload = gfp_mask & __GFP_WAIT; ++#endif + unsigned long flags; + int ret, id; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + if (preload) + idr_preload(gfp_mask); + spin_lock_irqsave(&idr_lock, flags); +@@ -626,6 +629,18 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) + idr_preload_end(); + if (id < 0) + return id; ++#else ++retry: ++ if (!idr_pre_get(&query_idr, gfp_mask)) ++ return -ENOMEM; ++ spin_lock_irqsave(&idr_lock, flags); ++ ret = idr_get_new(&query_idr, query, &id); ++ spin_unlock_irqrestore(&idr_lock, flags); ++ if (ret == -EAGAIN) ++ goto retry; ++ if (ret) ++ return ret; ++#endif + + query->mad_buf->timeout_ms = timeout_ms; + query->mad_buf->context[0] = query; +diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/ucm.c ++++ b/drivers/infiniband/core/ucm.c +@@ -176,6 +176,9 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx) + static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) + { + struct ib_ucm_context *ctx; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) ++ int result; ++#endif + + ctx = kzalloc(sizeof *ctx, GFP_KERNEL); + if (!ctx) +@@ -186,11 +189,26 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) + ctx->file = file; + INIT_LIST_HEAD(&ctx->events); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + mutex_lock(&ctx_id_mutex); + ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL); + mutex_unlock(&ctx_id_mutex); + if (ctx->id < 0) + goto error; ++#else ++ do { ++ result = idr_pre_get(&ctx_id_table, GFP_KERNEL); ++ if (!result) ++ goto error; ++ ++ mutex_lock(&ctx_id_mutex); ++ result = idr_get_new(&ctx_id_table, ctx, &ctx->id); ++ mutex_unlock(&ctx_id_mutex); ++ } while (result == -EAGAIN); ++ ++ if (result) ++ goto error; ++#endif + + list_add_tail(&ctx->file_list, &file->ctxs); + return ctx; +@@ -1321,8 +1339,16 @@ static void ib_ucm_remove_one(struct ib_device *device) + device_unregister(&ucm_dev->dev); + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + static CLASS_ATTR_STRING(abi_version, S_IRUGO, + __stringify(IB_USER_CM_ABI_VERSION)); ++#else ++static ssize_t show_abi_version(struct class *class, char *buf) ++{ ++ return sprintf(buf, "%d\n", IB_USER_CM_ABI_VERSION); ++} ++static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); ++#endif + + static int __init ib_ucm_init(void) + { +@@ -1335,7 +1361,11 @@ static int __init ib_ucm_init(void) + goto error1; + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + ret = class_create_file(&cm_class, &class_attr_abi_version.attr); ++#else ++ ret = class_create_file(&cm_class, &class_attr_abi_version); ++#endif + if (ret) { + printk(KERN_ERR "ucm: couldn't create abi_version attribute\n"); + goto error2; +@@ -1349,7 +1379,11 @@ static int __init ib_ucm_init(void) + return 0; + + error3: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + class_remove_file(&cm_class, &class_attr_abi_version.attr); ++#else ++ class_remove_file(&cm_class, &class_attr_abi_version); ++#endif + error2: + unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); + error1: +@@ -1359,7 +1393,11 @@ error1: + static void __exit ib_ucm_cleanup(void) + { + ib_unregister_client(&ucm_client); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + class_remove_file(&cm_class, &class_attr_abi_version.attr); ++#else ++ class_remove_file(&cm_class, &class_attr_abi_version); ++#endif + unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); + if (overflow_maj) + unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES); +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/ucma.c ++++ b/drivers/infiniband/core/ucma.c +@@ -56,6 +56,7 @@ MODULE_LICENSE("Dual BSD/GPL"); + + static unsigned int max_backlog = 1024; + ++#ifndef CONFIG_SYSCTL_SYSCALL_CHECK + static struct ctl_table_header *ucma_ctl_table_hdr; + static ctl_table ucma_ctl_table[] = { + { +@@ -67,6 +68,14 @@ static ctl_table ucma_ctl_table[] = { + }, + { } + }; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ++static struct ctl_path ucma_ctl_path[] = { ++ { .procname = "net" }, ++ { .procname = "rdma_ucm" }, ++ { } ++}; ++#endif ++#endif + + struct ucma_file { + struct mutex mut; +@@ -147,6 +156,9 @@ static void ucma_put_ctx(struct ucma_context *ctx) + static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) + { + struct ucma_context *ctx; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) ++ int ret; ++#endif + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) +@@ -157,11 +169,26 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) + INIT_LIST_HEAD(&ctx->mc_list); + ctx->file = file; + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) ++ do { ++ ret = idr_pre_get(&ctx_idr, GFP_KERNEL); ++ if (!ret) ++ goto error; ++ ++ mutex_lock(&mut); ++ ret = idr_get_new(&ctx_idr, ctx, &ctx->id); ++ mutex_unlock(&mut); ++ } while (ret == -EAGAIN); ++ ++ if (ret) ++ goto error; ++#else + mutex_lock(&mut); + ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); + mutex_unlock(&mut); + if (ctx->id < 0) + goto error; ++#endif + + list_add_tail(&ctx->list, &file->ctx_list); + return ctx; +@@ -174,16 +201,34 @@ error: + static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) + { + struct ucma_multicast *mc; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) ++ int ret; ++#endif + + mc = kzalloc(sizeof(*mc), GFP_KERNEL); + if (!mc) + return NULL; + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) ++ do { ++ ret = idr_pre_get(&multicast_idr, GFP_KERNEL); ++ if (!ret) ++ goto error; ++ ++ mutex_lock(&mut); ++ ret = idr_get_new(&multicast_idr, mc, &mc->id); ++ mutex_unlock(&mut); ++ } while (ret == -EAGAIN); ++ ++ if (ret) ++ goto error; ++#else + mutex_lock(&mut); + mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); + mutex_unlock(&mut); + if (mc->id < 0) + goto error; ++#endif + + mc->ctx = ctx; + list_add_tail(&mc->list, &ctx->mc_list); +@@ -1408,7 +1453,11 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, + struct rdma_ucm_migrate_id cmd; + struct rdma_ucm_migrate_resp resp; + struct ucma_context *ctx; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + struct fd f; ++#else ++ struct file *filp; ++#endif + struct ucma_file *cur_file; + int ret = 0; + +@@ -1416,12 +1465,21 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, + return -EFAULT; + + /* Get current fd to protect against it being closed */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + f = fdget(cmd.fd); + if (!f.file) ++#else ++ filp = fget(cmd.fd); ++ if (!filp) ++#endif + return -ENOENT; + + /* Validate current fd and prevent destruction of id. */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + ctx = ucma_get_ctx(f.file->private_data, cmd.id); ++#else ++ ctx = ucma_get_ctx(filp->private_data, cmd.id); ++#endif + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto file_put; +@@ -1455,7 +1513,11 @@ response: + + ucma_put_ctx(ctx); + file_put: ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + fdput(f); ++#else ++ fput(filp); ++#endif + return ret; + } + +@@ -1616,15 +1678,23 @@ static int __init ucma_init(void) + goto err1; + } + ++#ifndef CONFIG_SYSCTL_SYSCALL_CHECK ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); ++#else ++ ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table); ++#endif + if (!ucma_ctl_table_hdr) { + printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n"); + ret = -ENOMEM; + goto err2; + } ++#endif + return 0; ++#ifndef CONFIG_SYSCTL_SYSCALL_CHECK + err2: + device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); ++#endif + err1: + misc_deregister(&ucma_misc); + return ret; +@@ -1632,7 +1702,13 @@ err1: + + static void __exit ucma_cleanup(void) + { ++#ifndef CONFIG_SYSCTL_SYSCALL_CHECK ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + unregister_net_sysctl_table(ucma_ctl_table_hdr); ++#else ++ unregister_sysctl_table(ucma_ctl_table_hdr); ++#endif ++#endif + device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); + misc_deregister(&ucma_misc); + idr_destroy(&ctx_idr); +diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/umem.c ++++ b/drivers/infiniband/core/umem.c +@@ -137,7 +137,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, + + down_write(¤t->mm->mmap_sem); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + locked = npages + current->mm->pinned_vm; ++#else ++ locked = npages + current->mm->locked_vm; ++#endif + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { +@@ -207,7 +211,11 @@ out: + __ib_umem_release(context->device, umem, 0); + kfree(umem); + } else ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + current->mm->pinned_vm = locked; ++#else ++ current->mm->locked_vm = locked; ++#endif + + up_write(¤t->mm->mmap_sem); + if (vma_list) +@@ -223,7 +231,11 @@ static void ib_umem_account(struct work_struct *work) + struct ib_umem *umem = container_of(work, struct ib_umem, work); + + down_write(&umem->mm->mmap_sem); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + umem->mm->pinned_vm -= umem->diff; ++#else ++ umem->mm->locked_vm -= umem->diff; ++#endif + up_write(&umem->mm->mmap_sem); + mmput(umem->mm); + kfree(umem); +@@ -269,7 +281,11 @@ void ib_umem_release(struct ib_umem *umem) + } else + down_write(&mm->mmap_sem); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + current->mm->pinned_vm -= diff; ++#else ++ current->mm->locked_vm -= diff; ++#endif + up_write(&mm->mmap_sem); + mmput(mm); + kfree(umem); +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/user_mad.c ++++ b/drivers/infiniband/core/user_mad.c +@@ -969,8 +969,16 @@ static ssize_t show_port(struct device *dev, struct device_attribute *attr, + } + static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + static CLASS_ATTR_STRING(abi_version, S_IRUGO, + __stringify(IB_USER_MAD_ABI_VERSION)); ++#else ++static ssize_t show_abi_version(struct class *class, char *buf) ++{ ++ return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION); ++} ++static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); ++#endif + + static dev_t overflow_maj; + static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); +@@ -1175,7 +1183,11 @@ static void ib_umad_remove_one(struct ib_device *device) + kref_put(&umad_dev->ref, ib_umad_release_dev); + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) + static char *umad_devnode(struct device *dev, umode_t *mode) ++#else ++static char *umad_devnode(struct device *dev, mode_t *mode) ++#endif + { + return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); + } +@@ -1200,7 +1212,11 @@ static int __init ib_umad_init(void) + + umad_class->devnode = umad_devnode; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + ret = class_create_file(umad_class, &class_attr_abi_version.attr); ++#else ++ ret = class_create_file(umad_class, &class_attr_abi_version); ++#endif + if (ret) { + printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); + goto out_class; +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/uverbs_cmd.c ++++ b/drivers/infiniband/core/uverbs_cmd.c +@@ -128,6 +128,7 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) + { + int ret; + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) + idr_preload(GFP_KERNEL); + spin_lock(&ib_uverbs_idr_lock); + +@@ -139,6 +140,20 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) + idr_preload_end(); + + return ret < 0 ? ret : 0; ++#else ++retry: ++ if (!idr_pre_get(idr, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ spin_lock(&ib_uverbs_idr_lock); ++ ret = idr_get_new(idr, uobj, &uobj->id); ++ spin_unlock(&ib_uverbs_idr_lock); ++ ++ if (ret == -EAGAIN) ++ goto retry; ++ ++ return ret; ++#endif + } + + void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) +@@ -338,7 +353,11 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, + + resp.num_comp_vectors = file->device->num_comp_vectors; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + ret = get_unused_fd_flags(O_CLOEXEC); ++#else ++ ret = get_unused_fd(); ++#endif + if (ret < 0) + goto err_free; + resp.async_fd = ret; +@@ -709,7 +728,11 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, + struct ib_udata udata; + struct ib_uxrcd_object *obj; + struct ib_xrcd *xrcd = NULL; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + struct fd f = {NULL, 0}; ++#else ++ struct file *f = NULL; ++#endif + struct inode *inode = NULL; + int ret = 0; + int new_xrcd = 0; +@@ -728,6 +751,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, + + if (cmd.fd != -1) { + /* search for file descriptor */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + f = fdget(cmd.fd); + if (!f.file) { + ret = -EBADF; +@@ -735,6 +759,19 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, + } + + inode = file_inode(f.file); ++#else ++ f = fget(cmd.fd); ++ if (!f) { ++ ret = -EBADF; ++ goto err_tree_mutex_unlock; ++ } ++ ++ inode = f->f_dentry->d_inode; ++ if (!inode) { ++ ret = -EBADF; ++ goto err_tree_mutex_unlock; ++ } ++#endif + xrcd = find_xrcd(file->device, inode); + if (!xrcd && !(cmd.oflags & O_CREAT)) { + /* no file descriptor. Need CREATE flag */ +@@ -799,8 +836,13 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, + goto err_copy; + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + if (f.file) + fdput(f); ++#else ++ if (f) ++ fput(f); ++#endif + + mutex_lock(&file->mutex); + list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); +@@ -829,8 +871,13 @@ err: + put_uobj_write(&obj->uobject); + + err_tree_mutex_unlock: ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + if (f.file) + fdput(f); ++#else ++ if (f) ++ fput(f); ++#endif + + mutex_unlock(&file->device->xrcd_tree_mutex); + +@@ -1188,7 +1235,11 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, + if (copy_from_user(&cmd, buf, sizeof cmd)) + return -EFAULT; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + ret = get_unused_fd_flags(O_CLOEXEC); ++#else ++ ret = get_unused_fd(); ++#endif + if (ret < 0) + return ret; + resp.fd = ret; +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/core/uverbs_main.c ++++ b/drivers/infiniband/core/uverbs_main.c +@@ -563,6 +563,7 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, + struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd) + { + struct ib_uverbs_event_file *ev_file = NULL; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + struct fd f = fdget(fd); + + if (!f.file) +@@ -582,6 +583,29 @@ struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd) + out: + fdput(f); + return ev_file; ++#else ++ struct file *filp; ++ int fput_needed; ++ ++ filp = fget_light(fd, &fput_needed); ++ if (!filp) ++ return NULL; ++ ++ if (filp->f_op != &uverbs_event_fops) ++ goto out; ++ ++ ev_file = filp->private_data; ++ if (ev_file->is_async) { ++ ev_file = NULL; ++ goto out; ++ } ++ ++ kref_get(&ev_file->ref); ++ ++out: ++ fput_light(filp, fput_needed); ++ return ev_file; ++#endif + } + + static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, +@@ -759,8 +783,16 @@ static ssize_t show_dev_abi_version(struct device *device, + } + static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + static CLASS_ATTR_STRING(abi_version, S_IRUGO, + __stringify(IB_USER_VERBS_ABI_VERSION)); ++#else ++static ssize_t show_abi_version(struct class *class, char *buf) ++{ ++ return sprintf(buf, "%d\n", IB_USER_VERBS_ABI_VERSION); ++} ++static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); ++#endif + + static dev_t overflow_maj; + static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES); +@@ -890,7 +922,11 @@ static void ib_uverbs_remove_one(struct ib_device *device) + kfree(uverbs_dev); + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) + static char *uverbs_devnode(struct device *dev, umode_t *mode) ++#else ++static char *uverbs_devnode(struct device *dev, mode_t *mode) ++#endif + { + if (mode) + *mode = 0666; +@@ -917,7 +953,11 @@ static int __init ib_uverbs_init(void) + + uverbs_class->devnode = uverbs_devnode; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); ++#else ++ ret = class_create_file(uverbs_class, &class_attr_abi_version); ++#endif + if (ret) { + printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); + goto out_class; diff --git a/patches/0002-BACKPORT-mlx4-mlx4_core-mlx4_en-and-mlx4_ib.patch b/patches/0002-BACKPORT-mlx4-mlx4_core-mlx4_en-and-mlx4_ib.patch new file mode 100644 index 0000000..1be5787 --- /dev/null +++ b/patches/0002-BACKPORT-mlx4-mlx4_core-mlx4_en-and-mlx4_ib.patch @@ -0,0 +1,1208 @@ +From: Vladimir Sokolovsky +Subject: [PATCH] BACKPORT: mlx4 (mlx4_core, mlx4_en and mlx4_ib) + +Signed-off-by: Vladimir Sokolovsky +--- + drivers/infiniband/hw/mlx4/cm.c | 31 ++++ + drivers/infiniband/hw/mlx4/main.c | 16 ++ + drivers/net/ethernet/mellanox/mlx4/cmd.c | 6 + + drivers/net/ethernet/mellanox/mlx4/en_cq.c | 10 ++ + drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 47 ++++++ + drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 176 ++++++++++++++++++++++- + drivers/net/ethernet/mellanox/mlx4/en_rx.c | 29 ++++ + drivers/net/ethernet/mellanox/mlx4/en_tx.c | 32 ++++ + drivers/net/ethernet/mellanox/mlx4/eq.c | 8 + + drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 57 ++++++++ + include/linux/mlx4/cmd.h | 11 ++ + include/linux/mlx4/device.h | 6 + + 12 files changed, 428 insertions(+), 1 deletions(-) + +diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/hw/mlx4/cm.c ++++ b/drivers/infiniband/hw/mlx4/cm.c +@@ -204,6 +204,12 @@ static struct id_map_entry * + id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) + { + int ret; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) ++ int id; ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) ++ static int next_id; ++#endif + struct id_map_entry *ent; + struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; + +@@ -219,11 +225,35 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) + ent->dev = to_mdev(ibdev); + INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) ++ do { ++ spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); ++ ret = idr_get_new_above(&sriov->pv_id_table, ent, ++ next_id, &id); ++ if (!ret) { ++ next_id = ((unsigned) id + 1) & MAX_IDR_MASK; ++ ent->pv_cm_id = (u32)id; ++ sl_id_map_add(ibdev, ent); ++ } ++ ++ spin_unlock(&sriov->id_map_lock); ++ } while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL)); ++ /*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/ ++ if (!ret) { ++ spin_lock(&sriov->id_map_lock); ++ list_add_tail(&ent->list, &sriov->cm_list); ++ spin_unlock(&sriov->id_map_lock); ++ return ent; ++ } ++#else + idr_preload(GFP_KERNEL); + spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); + + ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT); + if (ret >= 0) { ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) ++ next_id = max(ret + 1, 0); ++#endif + ent->pv_cm_id = (u32)ret; + sl_id_map_add(ibdev, ent); + list_add_tail(&ent->list, &sriov->cm_list); +@@ -234,6 +264,7 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) + + if (ret >= 0) + return ent; ++#endif + + /*error flow*/ + kfree(ent); +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -806,7 +806,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, + if (ndev) { + rdma_get_mcast_mac((struct in6_addr *)gid, mac); + rtnl_lock(); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)) ++ dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac, 6, 0); ++#else + dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac); ++#endif + ret = 1; + rtnl_unlock(); + dev_put(ndev); +@@ -1130,7 +1134,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) + rdma_get_mcast_mac((struct in6_addr *)gid, mac); + if (ndev) { + rtnl_lock(); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)) ++ dev_mc_delete(mdev->iboe.netdevs[ge->port - 1], mac, 6, 0); ++#else + dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac); ++#endif + rtnl_unlock(); + dev_put(ndev); + } +@@ -1387,7 +1395,11 @@ static void netdev_removed(struct mlx4_ib_dev *dev, int port) + static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, + void *ptr) + { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + struct net_device *dev = netdev_notifier_info_to_dev(ptr); ++#else ++ struct net_device *dev = ptr; ++#endif + struct mlx4_ib_dev *ibdev; + struct net_device *oldnd; + struct mlx4_ib_iboe *iboe; +@@ -1491,8 +1503,12 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) + sprintf(name, "mlx4-ib-%d-%d@%s", + i, j, dev->pdev->bus->name); + /* Set IRQ for specific name (per ring) */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + if (mlx4_assign_eq(dev, name, NULL, + &ibdev->eq_table[eq])) { ++#else ++ if (mlx4_assign_eq(dev, name, &ibdev->eq_table[eq])) { ++#endif + /* Use legacy (same as mlx4_en driver) */ + pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq); + ibdev->eq_table[eq] = +diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c +@@ -2306,6 +2306,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) + } + EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) || defined(CONFIG_COMPAT_NDO_VF_MAC_VLAN) + int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf) + { + struct mlx4_priv *priv = mlx4_priv(dev); +@@ -2333,12 +2334,17 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in + ivf->vlan = s_info->default_vlan; + ivf->qos = s_info->default_qos; + ivf->tx_rate = s_info->tx_rate; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + ivf->spoofchk = s_info->spoofchk; ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) + ivf->linkstate = s_info->link_state; ++#endif + + return 0; + } + EXPORT_SYMBOL_GPL(mlx4_get_vf_config); ++#endif + + int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state) + { +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c +@@ -78,12 +78,14 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, + int err = 0; + char name[25]; + int timestamp_en = 0; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + struct cpu_rmap *rmap = + #ifdef CONFIG_RFS_ACCEL + priv->dev->rx_cpu_rmap; + #else + NULL; + #endif ++#endif + + cq->dev = mdev->pndev[priv->port]; + cq->mcq.set_ci_db = cq->wqres.db.db; +@@ -98,8 +100,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, + sprintf(name, "%s-%d", priv->dev->name, + cq->ring); + /* Set IRQ for specific name (per ring) */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + if (mlx4_assign_eq(mdev->dev, name, rmap, + &cq->vector)) { ++#else ++ if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) { ++#endif + cq->vector = (cq->ring + 1 + priv->port) + % mdev->dev->caps.num_comp_vectors; + mlx4_warn(mdev, "Failed Assigning an EQ to " +@@ -139,7 +145,9 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, + + if (!cq->is_tx) { + netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); ++#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_add(&cq->napi); ++#endif + napi_enable(&cq->napi); + } + +@@ -163,7 +171,9 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) + { + if (!cq->is_tx) { + napi_disable(&cq->napi); ++#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&cq->napi); ++#endif + synchronize_rcu(); + netif_napi_del(&cq->napi); + } +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +@@ -557,14 +557,22 @@ static void mlx4_en_get_ringparam(struct net_device *dev, + param->tx_pending = priv->tx_ring[0].size; + } + ++#ifndef CONFIG_COMPAT_INDIR_SETTING + static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) ++#else ++u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) ++#endif + { + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->rx_ring_num; + } + ++#ifndef CONFIG_COMPAT_INDIR_SETTING + static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index) ++#else ++int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index) ++#endif + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rss_map *rss_map = &priv->rss_map; +@@ -582,8 +590,13 @@ static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index) + return err; + } + ++#ifndef CONFIG_COMPAT_INDIR_SETTING + static int mlx4_en_set_rxfh_indir(struct net_device *dev, + const u32 *ring_index) ++#else ++int mlx4_en_set_rxfh_indir(struct net_device *dev, ++ const u32 *ring_index) ++#endif + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; +@@ -1035,8 +1048,13 @@ static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv) + + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) ++#else ++static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ++ void *rule_locs) ++#endif + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; +@@ -1064,7 +1082,11 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { + err = mlx4_en_get_flow(dev, cmd, i); + if (!err) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + rule_locs[priority++] = i; ++#else ++ ((u32 *)(rule_locs))[priority++] = i; ++#endif + i++; + } + err = 0; +@@ -1102,8 +1124,13 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) + return err; + } + ++#ifndef CONFIG_COMPAT_NUM_CHANNELS + static void mlx4_en_get_channels(struct net_device *dev, + struct ethtool_channels *channel) ++#else ++void mlx4_en_get_channels(struct net_device *dev, ++ struct ethtool_channels *channel) ++#endif + { + struct mlx4_en_priv *priv = netdev_priv(dev); + +@@ -1116,8 +1143,13 @@ static void mlx4_en_get_channels(struct net_device *dev, + channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP; + } + ++#ifndef CONFIG_COMPAT_NUM_CHANNELS + static int mlx4_en_set_channels(struct net_device *dev, + struct ethtool_channels *channel) ++#else ++int mlx4_en_set_channels(struct net_device *dev, ++ struct ethtool_channels *channel) ++#endif + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; +@@ -1148,10 +1180,15 @@ static int mlx4_en_set_channels(struct net_device *dev, + goto out; + } + ++#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined (CONFIG_COMPAT_IS_NUM_TX_QUEUES)) && \ ++ !defined (CONFIG_COMPAT_DISABLE_REAL_NUM_TXQ) + netif_set_real_num_tx_queues(dev, priv->tx_ring_num); ++#endif + netif_set_real_num_rx_queues(dev, priv->rx_ring_num); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) + mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); ++#endif + + en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num); + en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num); +@@ -1219,11 +1256,21 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { + .set_ringparam = mlx4_en_set_ringparam, + .get_rxnfc = mlx4_en_get_rxnfc, + .set_rxnfc = mlx4_en_set_rxnfc, ++#ifndef CONFIG_COMPAT_INDIR_SETTING + .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, + .get_rxfh_indir = mlx4_en_get_rxfh_indir, + .set_rxfh_indir = mlx4_en_set_rxfh_indir, ++#endif ++#ifdef CONFIG_COMPAT_ETHTOOL_OPS_EXT ++}; ++ ++const struct ethtool_ops_ext mlx4_en_ethtool_ops_ext = { ++ .size = sizeof(mlx4_en_ethtool_ops_ext), ++#endif ++#ifndef CONFIG_COMPAT_NUM_CHANNELS + .get_channels = mlx4_en_get_channels, + .set_channels = mlx4_en_set_channels, ++#endif + .get_ts_info = mlx4_en_get_ts_info, + }; + +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +@@ -48,6 +48,7 @@ + #include "mlx4_en.h" + #include "en_port.h" + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) + int mlx4_en_setup_tc(struct net_device *dev, u8 up) + { + struct mlx4_en_priv *priv = netdev_priv(dev); +@@ -67,6 +68,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) + + return 0; + } ++#endif + + #ifdef CONFIG_NET_RX_BUSY_POLL + /* must be called with local_bh_disable()d */ +@@ -254,10 +256,17 @@ static inline struct mlx4_en_filter * + mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, + __be16 src_port, __be16 dst_port) + { ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ struct hlist_node *elem; ++#endif + struct mlx4_en_filter *filter; + struct mlx4_en_filter *ret = NULL; + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ hlist_for_each_entry(filter, elem, ++#else + hlist_for_each_entry(filter, ++#endif + filter_hash_bucket(priv, src_ip, dst_ip, + src_port, dst_port), + filter_chain) { +@@ -385,8 +394,16 @@ static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) + } + #endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) ++static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, ++ u16 vid) ++#else ++static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, ++ u16 vid) ++#endif + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; +@@ -408,11 +425,21 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, + en_dbg(HW, priv, "failed adding vlan %d\n", vid); + mutex_unlock(&mdev->state_lock); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) + return 0; ++#endif + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) ++static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, ++ u16 vid) ++#else ++static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, ++ u16 vid) ++#endif + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; +@@ -437,7 +464,9 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, + } + mutex_unlock(&mdev->state_lock); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) + return 0; ++#endif + } + + static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) +@@ -605,13 +634,21 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv) + mlx4_unregister_mac(dev, priv->port, mac); + } else { + struct mlx4_mac_entry *entry; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ struct hlist_node *n, *tmp; ++#else + struct hlist_node *tmp; ++#endif + struct hlist_head *bucket; + unsigned int i; + + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { + bucket = &priv->mac_hash[i]; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { ++#else + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { ++#endif + mac = mlx4_en_mac_to_u64(entry->mac); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + entry->mac); +@@ -643,11 +680,19 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, + struct hlist_head *bucket; + unsigned int mac_hash; + struct mlx4_mac_entry *entry; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ struct hlist_node *n, *tmp; ++#else + struct hlist_node *tmp; ++#endif + u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); + + bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { ++#else + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { ++#endif + if (ether_addr_equal_64bits(entry->mac, prev_mac)) { + mlx4_en_uc_steer_release(priv, entry->mac, + qpn, entry->reg_id); +@@ -736,17 +781,29 @@ static void mlx4_en_clear_list(struct net_device *dev) + static void mlx4_en_cache_mclist(struct net_device *dev) + { + struct mlx4_en_priv *priv = netdev_priv(dev); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) + struct netdev_hw_addr *ha; ++#else ++ struct dev_mc_list *mclist; ++#endif + struct mlx4_en_mc_list *tmp; + + mlx4_en_clear_list(dev); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) + netdev_for_each_mc_addr(ha, dev) { ++#else ++ for (mclist = dev->mc_list; mclist; mclist = mclist->next) { ++#endif + tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); + if (!tmp) { + mlx4_en_clear_list(dev); + return; + } ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) + memcpy(tmp->addr, ha->addr, ETH_ALEN); ++#else ++ memcpy(tmp->addr, mclist->dmi_addr, ETH_ALEN); ++#endif + list_add_tail(&tmp->list, &priv->mc_list); + } + } +@@ -1054,7 +1111,11 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, + { + struct netdev_hw_addr *ha; + struct mlx4_mac_entry *entry; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ struct hlist_node *n, *tmp; ++#else + struct hlist_node *tmp; ++#endif + bool found; + u64 mac; + int err = 0; +@@ -1070,7 +1131,11 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, + /* find what to remove */ + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { + bucket = &priv->mac_hash[i]; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { ++#else + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { ++#endif + found = false; + netdev_for_each_uc_addr(ha, dev) { + if (ether_addr_equal_64bits(entry->mac, +@@ -1113,7 +1178,11 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, + netdev_for_each_uc_addr(ha, dev) { + found = false; + bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ hlist_for_each_entry(entry, n, bucket, hlist) { ++#else + hlist_for_each_entry(entry, bucket, hlist) { ++#endif + if (ether_addr_equal_64bits(entry->mac, ha->addr)) { + found = true; + break; +@@ -1195,7 +1264,11 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work) + } + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + if (dev->priv_flags & IFF_UNICAST_FLT) ++#else ++ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) ++#endif + mlx4_en_do_uc_filter(priv, dev, mdev); + + /* Promsicuous mode: disable all filters */ +@@ -1543,8 +1616,12 @@ int mlx4_en_start_port(struct net_device *dev) + + /* Configure ring */ + tx_ring = &priv->tx_ring[i]; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) + err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, + i / priv->num_tx_rings_p_up); ++#else ++ err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); ++#endif + if (err) { + en_err(priv, "Failed allocating Tx ring\n"); + mlx4_en_deactivate_cq(priv, cq); +@@ -1847,9 +1924,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) + int i; + + #ifdef CONFIG_RFS_ACCEL ++#ifdef CONFIG_COMPAT_IS_NETDEV_EXTENDED ++ free_irq_cpu_rmap(mlx4_en_rx_cpu_rmap(priv)); ++ mlx4_en_rx_cpu_rmap(priv) = NULL; ++#else + free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); + priv->dev->rx_cpu_rmap = NULL; + #endif ++#endif + + for (i = 0; i < priv->tx_ring_num; i++) { + if (priv->tx_ring[i].tx_info) +@@ -1907,12 +1989,18 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) + } + + #ifdef CONFIG_RFS_ACCEL ++#ifdef CONFIG_COMPAT_IS_NETDEV_EXTENDED ++ mlx4_en_rx_cpu_rmap(priv) = alloc_irq_cpu_rmap(priv->rx_ring_num); ++ if (!mlx4_en_rx_cpu_rmap(priv)) ++ goto err; ++#else + if (priv->mdev->dev->caps.comp_pool) { + priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); + if (!priv->dev->rx_cpu_rmap) + goto err; + } + #endif ++#endif + + return 0; + +@@ -2058,7 +2146,11 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + } + } + +-static int mlx4_en_set_features(struct net_device *netdev, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) || defined(CONFIG_COMPAT_LOOPBACK)) ++#ifndef CONFIG_COMPAT_LOOPBACK ++static ++#endif ++int mlx4_en_set_features(struct net_device *netdev, + netdev_features_t features) + { + struct mlx4_en_priv *priv = netdev_priv(netdev); +@@ -2074,7 +2166,9 @@ static int mlx4_en_set_features(struct net_device *netdev, + return 0; + + } ++#endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) || defined(CONFIG_COMPAT_NDO_VF_MAC_VLAN) + static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) + { + struct mlx4_en_priv *en_priv = netdev_priv(dev); +@@ -2094,7 +2188,9 @@ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) + + return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); + } ++#endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) + { + struct mlx4_en_priv *en_priv = netdev_priv(dev); +@@ -2102,7 +2198,9 @@ static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) + + return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); + } ++#endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) || defined(CONFIG_COMPAT_NDO_VF_MAC_VLAN) + static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) + { + struct mlx4_en_priv *en_priv = netdev_priv(dev); +@@ -2110,7 +2208,9 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_ + + return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); + } ++#endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) + static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) + { + struct mlx4_en_priv *en_priv = netdev_priv(dev); +@@ -2118,6 +2218,7 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st + + return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); + } ++#endif + static const struct net_device_ops mlx4_netdev_ops = { + .ndo_open = mlx4_en_open, + .ndo_stop = mlx4_en_close, +@@ -2135,11 +2236,15 @@ static const struct net_device_ops mlx4_netdev_ops = { + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mlx4_en_netpoll, + #endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + .ndo_set_features = mlx4_en_set_features, + .ndo_setup_tc = mlx4_en_setup_tc, ++#endif + #ifdef CONFIG_RFS_ACCEL ++#ifndef CONFIG_COMPAT_IS_NETDEV_EXTENDED + .ndo_rx_flow_steer = mlx4_en_filter_rfs, + #endif ++#endif + #ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = mlx4_en_low_latency_recv, + #endif +@@ -2158,19 +2263,31 @@ static const struct net_device_ops mlx4_netdev_ops_master = { + .ndo_tx_timeout = mlx4_en_tx_timeout, + .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) || defined(CONFIG_COMPAT_NDO_VF_MAC_VLAN) + .ndo_set_vf_mac = mlx4_en_set_vf_mac, + .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) + .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) || defined(CONFIG_COMPAT_NDO_VF_MAC_VLAN) + .ndo_get_vf_config = mlx4_en_get_vf_config, ++#endif + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mlx4_en_netpoll, + #endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + .ndo_set_features = mlx4_en_set_features, + .ndo_setup_tc = mlx4_en_setup_tc, ++#endif + #ifdef CONFIG_RFS_ACCEL ++#ifndef CONFIG_COMPAT_IS_NETDEV_EXTENDED + .ndo_rx_flow_steer = mlx4_en_filter_rfs, + #endif ++#endif + }; + + int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, +@@ -2182,12 +2299,19 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + int err; + u64 mac_u64; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined(CONFIG_COMPAT_NEW_TX_RING_SCHEME) + dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), + MAX_TX_RINGS, MAX_RX_RINGS); ++#else ++ dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); ++#endif + if (dev == NULL) + return -ENOMEM; + ++#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined (CONFIG_COMPAT_IS_NUM_TX_QUEUES) || defined (CONFIG_X86_XEN)) && \ ++ !defined (CONFIG_COMPAT_DISABLE_REAL_NUM_TXQ) + netif_set_real_num_tx_queues(dev, prof->tx_ring_num); ++#endif + netif_set_real_num_rx_queues(dev, prof->rx_ring_num); + + SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); +@@ -2233,6 +2357,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); + INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); + INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); ++#ifndef CONFIG_COMPAT_DISABLE_DCB + #ifdef CONFIG_MLX4_EN_DCB + if (!mlx4_is_slave(priv->mdev->dev)) { + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { +@@ -2243,6 +2368,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + } + } + #endif ++#endif + + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) + INIT_HLIST_HEAD(&priv->mac_hash[i]); +@@ -2302,21 +2428,42 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + else + dev->netdev_ops = &mlx4_netdev_ops; + dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; ++#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined (CONFIG_COMPAT_IS_NUM_TX_QUEUES) || defined (CONFIG_X86_XEN)) && \ ++ !defined (CONFIG_COMPAT_DISABLE_REAL_NUM_TXQ) + netif_set_real_num_tx_queues(dev, priv->tx_ring_num); ++#endif + netif_set_real_num_rx_queues(dev, priv->rx_ring_num); + ++#ifdef CONFIG_RFS_ACCEL ++#ifdef CONFIG_COMPAT_IS_NETDEV_EXTENDED ++ netdev_extended(dev)->rfs_data.ndo_rx_flow_steer = mlx4_en_filter_rfs; ++#endif ++#endif + SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); + ++#ifdef CONFIG_COMPAT_ETHTOOL_OPS_EXT ++ set_ethtool_ops_ext(dev, &mlx4_en_ethtool_ops_ext); ++#endif ++ + /* + * Set driver features + */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; ++#else ++ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; ++#endif + if (mdev->LSO_support) + dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + + dev->vlan_features = dev->hw_features; + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) + dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; ++#else ++ dev->hw_features |= NETIF_F_RXCSUM; ++#endif + dev->features = dev->hw_features | NETIF_F_HIGHDMA | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; +@@ -2326,9 +2473,36 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + MLX4_STEERING_MODE_DEVICE_MANAGED) + dev->hw_features |= NETIF_F_NTUPLE; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) + dev->priv_flags |= IFF_UNICAST_FLT; ++#endif ++ ++#else ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) ++ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; ++#else ++ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; ++#endif ++ if (mdev->LSO_support) ++ dev->features |= NETIF_F_TSO | NETIF_F_TSO6; + ++ dev->vlan_features = dev->features; ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) || defined (CONFIG_COMPAT_NETIF_F_RXHASH) ++ dev->features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; ++#else ++ dev->features |= NETIF_F_RXCSUM; ++#endif ++ dev->features |= NETIF_F_HIGHDMA | ++ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | ++ NETIF_F_HW_VLAN_FILTER; ++ ++ if (mdev->dev->caps.steering_mode == ++ MLX4_STEERING_MODE_DEVICE_MANAGED) ++ dev->features |= NETIF_F_NTUPLE; ++#endif ++ + mdev->pndev[port] = dev; + + netif_carrier_off(dev); +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +@@ -656,6 +656,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud + + if (is_multicast_ether_addr(ethh->h_dest)) { + struct mlx4_mac_entry *entry; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ struct hlist_node *n; ++#endif + struct hlist_head *bucket; + unsigned int mac_hash; + +@@ -663,7 +666,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud + mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; + bucket = &priv->mac_hash[mac_hash]; + rcu_read_lock(); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ hlist_for_each_entry_rcu(entry, n, bucket, hlist) { ++#else + hlist_for_each_entry_rcu(entry, bucket, hlist) { ++#endif + if (ether_addr_equal_64bits(entry->mac, + ethh->h_source)) { + rcu_read_unlock(); +@@ -715,7 +722,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud + (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + u16 vid = be16_to_cpu(cqe->sl_vid); + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) ++ __vlan_hwaccel_put_tag(gro_skb, vid); ++#else + __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); ++#endif + } + + if (dev->features & NETIF_F_RXHASH) +@@ -760,13 +771,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud + skb->protocol = eth_type_trans(skb, dev); + skb_record_rx_queue(skb, cq->ring); + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) || defined (CONFIG_COMPAT_NETIF_F_RXHASH) + if (dev->features & NETIF_F_RXHASH) + skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid); ++#endif + + if ((be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_VLAN_PRESENT_MASK) && + (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) ++ __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid)); ++#else + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); ++#endif + + if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { + timestamp = mlx4_en_get_cqe_ts(cqe); +@@ -774,7 +791,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud + timestamp); + } + ++#ifdef CONFIG_NET_RX_BUSY_POLL + skb_mark_napi_id(skb, &cq->napi); ++#endif + + /* Push it up the stack */ + netif_receive_skb(skb); +@@ -910,8 +929,13 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, + qp->event = mlx4_en_sqp_event; + + memset(context, 0, sizeof *context); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) + mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, + qpn, ring->cqn, -1, context); ++#else ++ mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, ++ qpn, ring->cqn, context); ++#endif + context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); + + /* Cancel FCS removal if FW allows */ +@@ -1007,8 +1031,13 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) + goto rss_err; + } + rss_map->indir_qp.event = mlx4_en_sqp_event; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) + mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, + priv->rx_ring[0].cqn, -1, &context); ++#else ++ mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, ++ priv->rx_ring[0]->cqn, &context); ++#endif + + if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) + rss_rings = priv->rx_ring_num; +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c +@@ -155,7 +155,11 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, + + int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) + int cq, int user_prio) ++#else ++ int cq) ++#endif + { + struct mlx4_en_dev *mdev = priv->mdev; + int err; +@@ -171,8 +175,13 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, + ring->qp_state = MLX4_QP_STATE_RST; + ring->doorbell_qpn = ring->qp.qpn << 8; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) + mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, + ring->cqn, user_prio, &ring->context); ++#else ++ mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, ++ ring->cqn, &ring->context); ++#endif + if (ring->bf_enabled) + ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); + +@@ -567,16 +576,34 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk + u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) + { + struct mlx4_en_priv *priv = netdev_priv(dev); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) + u16 rings_p_up = priv->num_tx_rings_p_up; + u8 up = 0; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + if (dev->num_tc) ++#else ++ if (netdev_get_num_tc(dev)) ++#endif + return skb_tx_hash(dev, skb); + + if (vlan_tx_tag_present(skb)) + up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; + + return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; ++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) */ ++ u16 vlan_tag = 0; ++ ++ /* If we support per priority flow control and the packet contains ++ * a vlan tag, send the packet to the TX ring assigned to that priority ++ */ ++ if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) { ++ vlan_tag = vlan_tx_tag_get(skb); ++ return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); ++ } ++ ++ return skb_tx_hash(dev, skb); ++#endif + } + + static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) +@@ -732,8 +759,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) + * set flag for further reference + */ + if (ring->hwtstamp_tx_type == HWTSTAMP_TX_ON && ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; ++#else ++ skb_shinfo(skb)->tx_flags.flags & SKBTX_HW_TSTAMP) { ++ skb_shinfo(skb)->tx_flags.flags |= SKBTX_IN_PROGRESS; ++#endif + tx_info->ts_requested = 1; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/net/ethernet/mellanox/mlx4/eq.c ++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c +@@ -39,7 +39,9 @@ + #include + + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + #include ++#endif + + #include "mlx4.h" + #include "fw.h" +@@ -1312,8 +1314,12 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) + } + EXPORT_SYMBOL(mlx4_test_interrupts); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, + int *vector) ++#else ++int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector) ++#endif + { + + struct mlx4_priv *priv = mlx4_priv(dev); +@@ -1327,6 +1333,7 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, + snprintf(priv->eq_table.irq_names + + vec * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, "%s", name); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + #ifdef CONFIG_RFS_ACCEL + if (rmap) { + err = irq_cpu_rmap_add(rmap, +@@ -1335,6 +1342,7 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, + mlx4_warn(dev, "Failed adding irq rmap\n"); + } + #endif ++#endif + err = request_irq(priv->eq_table.eq[vec].irq, + mlx4_msi_x_interrupt, 0, + &priv->eq_table.irq_names[vec<<5], +diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +@@ -58,6 +58,23 @@ + #define DRV_NAME "mlx4_en" + #define DRV_VERSION "2.0" + #define DRV_RELDATE "Dec 2011" ++#ifndef CONFIG_COMPAT_DISABLE_DCB ++#ifdef CONFIG_MLX4_EN_DCB ++ ++#ifndef CONFIG_NET_SCH_MULTIQ ++#define CONFIG_COMPAT_MQPRIO ++#endif ++ ++#endif ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) ++#define CONFIG_COMPAT_INDIR_SETTING ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) && !defined(CONFIG_COMPAT_HAS_NUM_CHANNELS) ++#define CONFIG_COMPAT_NUM_CHANNELS ++#endif + + #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) + +@@ -722,7 +739,11 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ri + void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); + int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) + int cq, int user_prio); ++#else ++ int cq); ++#endif + void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring); + +@@ -740,7 +761,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, + int budget); + int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); + void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_NEW_TX_RING_SCHEME) + int is_tx, int rss, int qpn, int cqn, int user_prio, ++#else ++ int is_tx, int rss, int qpn, int cqn, ++#endif + struct mlx4_qp_context *context); + void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); + int mlx4_en_map_buffer(struct mlx4_buf *buf); +@@ -760,14 +785,38 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv); + int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); + int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); + ++#ifndef CONFIG_COMPAT_DISABLE_DCB + #ifdef CONFIG_MLX4_EN_DCB + extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops; + extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops; + #endif ++#endif ++ ++#ifdef CONFIG_COMPAT_NUM_CHANNELS ++struct ethtool_channels { ++ __u32 cmd; ++ __u32 max_rx; ++ __u32 max_tx; ++ __u32 max_other; ++ __u32 max_combined; ++ __u32 rx_count; ++ __u32 tx_count; ++ __u32 other_count; ++ __u32 combined_count; ++}; ++ ++int mlx4_en_set_channels(struct net_device *dev, ++ struct ethtool_channels *channel); ++void mlx4_en_get_channels(struct net_device *dev, ++ struct ethtool_channels *channel); ++#endif + + int mlx4_en_setup_tc(struct net_device *dev, u8 up); + + #ifdef CONFIG_RFS_ACCEL ++#ifdef CONFIG_COMPAT_IS_NETDEV_EXTENDED ++#define mlx4_en_rx_cpu_rmap(__priv) netdev_extended(__priv->dev)->rfs_data.rx_cpu_rmap ++#endif + void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *rx_ring); + #endif +@@ -792,6 +841,9 @@ int mlx4_en_timestamp_config(struct net_device *dev, + /* Globals + */ + extern const struct ethtool_ops mlx4_en_ethtool_ops; ++#ifdef CONFIG_COMPAT_ETHTOOL_OPS_EXT ++extern const struct ethtool_ops_ext mlx4_en_ethtool_ops_ext; ++#endif + + + +@@ -825,4 +877,9 @@ do { \ + pr_warning("%s %s: " format, DRV_NAME, \ + dev_name(&mdev->pdev->dev), ##arg) + ++#ifdef CONFIG_COMPAT_INDIR_SETTING ++u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev); ++int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index); ++int mlx4_en_set_rxfh_indir(struct net_device *dev, const u32 *ring_index); ++#endif + #endif +diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h +index xxxxxxx..xxxxxxx xxxxxx +--- a/include/linux/mlx4/cmd.h ++++ b/include/linux/mlx4/cmd.h +@@ -238,9 +238,20 @@ u32 mlx4_comm_get_version(void); + int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); + int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); + int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) || defined(CONFIG_COMPAT_NDO_VF_MAC_VLAN) + int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); ++#endif + int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)) ++enum { ++ IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ ++ IFLA_VF_LINK_STATE_ENABLE, /* link always up */ ++ IFLA_VF_LINK_STATE_DISABLE, /* link always down */ ++ __IFLA_VF_LINK_STATE_MAX, ++}; ++#endif ++ + #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) + + #endif /* MLX4_CMD_H */ +diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h +index xxxxxxx..xxxxxxx xxxxxx +--- a/include/linux/mlx4/device.h ++++ b/include/linux/mlx4/device.h +@@ -37,7 +37,9 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + #include ++#endif + + #include + +@@ -1090,8 +1092,12 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, + int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); + int mlx4_SYNC_TPT(struct mlx4_dev *dev); + int mlx4_test_interrupts(struct mlx4_dev *dev); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, + int *vector); ++#else ++int mlx4_assign_eq(struct mlx4_dev *dev, char* name , int* vector); ++#endif + void mlx4_release_eq(struct mlx4_dev *dev, int vec); + + int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); diff --git a/patches/0002-netlink-Avoid-pr_fmt-redefinition.patch b/patches/0002-netlink-Avoid-pr_fmt-redefinition.patch deleted file mode 100644 index 419de07..0000000 --- a/patches/0002-netlink-Avoid-pr_fmt-redefinition.patch +++ /dev/null @@ -1,22 +0,0 @@ -From: Vladimir Sokolovsky -Subject: [PATCH] netlink: Avoid pr_fmt redefinition - -Signed-off-by: Vladimir Sokolovsky ---- - drivers/infiniband/core/netlink.c | 3 +++ - 1 files changed, 3 insertions(+), 0 deletions(-) - -diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c -index xxxxxxx..xxxxxxx xxxxxx ---- a/drivers/infiniband/core/netlink.c -+++ b/drivers/infiniband/core/netlink.c -@@ -30,6 +30,9 @@ - * SOFTWARE. - */ - -+#ifdef pr_fmt -+#undef pr_fmt -+#endif - #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ - - #include diff --git a/patches/0003-BACKPORT-ib_ipoib.patch b/patches/0003-BACKPORT-ib_ipoib.patch new file mode 100644 index 0000000..2caf839 --- /dev/null +++ b/patches/0003-BACKPORT-ib_ipoib.patch @@ -0,0 +1,218 @@ +From: Vladimir Sokolovsky +Subject: [PATCH] BACKPORT: ib_ipoib + +Signed-off-by: Vladimir Sokolovsky +--- + drivers/infiniband/ulp/ipoib/ipoib.h | 3 ++ + drivers/infiniband/ulp/ipoib/ipoib_cm.c | 8 ++++++ + drivers/infiniband/ulp/ipoib/ipoib_main.c | 31 ++++++++++++++++++++++++ + drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 15 +++++++++++ + drivers/infiniband/ulp/ipoib/ipoib_netlink.c | 8 ++++++ + 5 files changed, 65 insertions(+), 0 deletions(-) + +diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/ulp/ipoib/ipoib.h ++++ b/drivers/infiniband/ulp/ipoib/ipoib.h +@@ -96,6 +96,9 @@ enum { + IPOIB_NEIGH_TBL_FLUSH = 12, + + IPOIB_MAX_BACKOFF_SECONDS = 16, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) ++ IPOIB_FLAG_CSUM = 17, ++#endif + + IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ + IPOIB_MCAST_FLAG_SENDONLY = 1, +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c +@@ -1379,7 +1379,11 @@ static void ipoib_cm_skb_reap(struct work_struct *work) + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); + #if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ++#else ++ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev); ++#endif + #endif + dev_kfree_skb_any(skb); + +@@ -1398,7 +1402,11 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, + int e = skb_queue_empty(&priv->cm.skb_queue); + + if (skb_dst(skb)) ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) + skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); ++#else ++ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); ++#endif + + skb_queue_tail(&priv->cm.skb_queue, skb); + if (e) +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c +@@ -183,6 +183,7 @@ static void ipoib_uninit(struct net_device *dev) + ipoib_dev_cleanup(dev); + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) + { + struct ipoib_dev_priv *priv = netdev_priv(dev); +@@ -192,6 +193,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu + + return features; + } ++#endif + + static int ipoib_change_mtu(struct net_device *dev, int new_mtu) + { +@@ -229,7 +231,14 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) + set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); + ipoib_warn(priv, "enabling connected mode " + "will cause multicast packet drops\n"); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + netdev_update_features(dev); ++#else ++ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO); ++ if (ipoib_cm_max_mtu(dev) > priv->mcast_mtu) ++ ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", ++ priv->mcast_mtu); ++#endif + rtnl_unlock(); + priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; + +@@ -240,7 +249,16 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) + + if (!strcmp(buf, "datagram\n")) { + clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + netdev_update_features(dev); ++#else ++ if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) { ++ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; ++ ++ if (priv->hca_caps & IB_DEVICE_UD_TSO) ++ dev->features |= NETIF_F_TSO; ++ } ++#endif + dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); + rtnl_unlock(); + ipoib_flush_paths(dev); +@@ -1335,7 +1353,9 @@ static const struct net_device_ops ipoib_netdev_ops = { + .ndo_open = ipoib_open, + .ndo_stop = ipoib_stop, + .ndo_change_mtu = ipoib_change_mtu, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + .ndo_fix_features = ipoib_fix_features, ++#endif + .ndo_start_xmit = ipoib_start_xmit, + .ndo_tx_timeout = ipoib_timeout, + .ndo_set_rx_mode = ipoib_set_mcast_list, +@@ -1522,6 +1542,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) + kfree(device_attr); + + if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) + priv->dev->hw_features = NETIF_F_SG | + NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + +@@ -1529,6 +1550,14 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) + priv->dev->hw_features |= NETIF_F_TSO; + + priv->dev->features |= priv->dev->hw_features; ++#else ++ set_bit(IPOIB_FLAG_CSUM, &priv->flags); ++ priv->dev->features |= NETIF_F_SG | ++ NETIF_F_IP_CSUM | NETIF_F_RXCSUM; ++ ++ if (priv->hca_caps & IB_DEVICE_UD_TSO) ++ priv->dev->features |= NETIF_F_TSO; ++#endif + } + + return 0; +@@ -1560,7 +1589,9 @@ static struct net_device *ipoib_add_port(const char *format, + priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); + priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) + priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh); ++#endif + + result = ib_query_pkey(hca, port, 0, &priv->pkey); + if (result) { +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +@@ -773,7 +773,11 @@ void ipoib_mcast_restart_task(struct work_struct *work) + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, restart_task); + struct net_device *dev = priv->dev; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) + struct netdev_hw_addr *ha; ++#else ++ struct dev_mc_list *mclist; ++#endif + struct ipoib_mcast *mcast, *tmcast; + LIST_HEAD(remove_list); + unsigned long flags; +@@ -798,6 +802,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) + clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); + + /* Mark all of the entries that are found or don't exist */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) + netdev_for_each_mc_addr(ha, dev) { + union ib_gid mgid; + +@@ -805,6 +810,16 @@ void ipoib_mcast_restart_task(struct work_struct *work) + continue; + + memcpy(mgid.raw, ha->addr + 4, sizeof mgid); ++#else ++ for (mclist = dev->mc_list; mclist; mclist = mclist->next) { ++ union ib_gid mgid; ++ ++ if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr, ++ dev->broadcast)) ++ continue; ++ ++ memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); ++#endif + + mcast = __ipoib_mcast_find(dev, &mgid); + if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +index xxxxxxx..xxxxxxx xxxxxx +--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +@@ -135,7 +135,11 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, + return err; + } + ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33) + static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head) ++#else ++static void ipoib_unregister_child_dev(struct net_device *dev) ++#endif + { + struct ipoib_dev_priv *priv, *ppriv; + +@@ -143,7 +147,11 @@ static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head + ppriv = netdev_priv(priv->parent); + + mutex_lock(&ppriv->vlan_mutex); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33) + unregister_netdevice_queue(dev, head); ++#else ++ unregister_netdevice(dev); ++#endif + list_del(&priv->list); + mutex_unlock(&ppriv->vlan_mutex); + } diff --git a/patches/0003-BACKPORT-mlx4_en.patch b/patches/0003-BACKPORT-mlx4_en.patch deleted file mode 100644 index 0b5f4bb..0000000 --- a/patches/0003-BACKPORT-mlx4_en.patch +++ /dev/null @@ -1,109 +0,0 @@ -From: Vladimir Sokolovsky -Subject: [PATCH] BACKPORT: mlx4_en - -Signed-off-by: Vladimir Sokolovsky ---- - drivers/net/ethernet/mellanox/mlx4/cmd.c | 6 ++++++ - drivers/net/ethernet/mellanox/mlx4/en_cq.c | 4 ++++ - drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 4 ++++ - drivers/net/ethernet/mellanox/mlx4/en_rx.c | 2 ++ - 4 files changed, 16 insertions(+), 0 deletions(-) - -diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c -index xxxxxxx..xxxxxxx xxxxxx ---- a/drivers/net/ethernet/mellanox/mlx4/cmd.c -+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c -@@ -2306,6 +2306,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) - } - EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk); - -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) || defined(CONFIG_COMPAT_NDO_VF_MAC_VLAN) - int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf) - { - struct mlx4_priv *priv = mlx4_priv(dev); -@@ -2333,12 +2334,17 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in - ivf->vlan = s_info->default_vlan; - ivf->qos = s_info->default_qos; - ivf->tx_rate = s_info->tx_rate; -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) - ivf->spoofchk = s_info->spoofchk; -+#endif -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) - ivf->linkstate = s_info->link_state; -+#endif - - return 0; - } - EXPORT_SYMBOL_GPL(mlx4_get_vf_config); -+#endif - - int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state) - { -diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c -index xxxxxxx..xxxxxxx xxxxxx ---- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c -+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c -@@ -139,7 +139,9 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, - - if (!cq->is_tx) { - netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); -+#ifdef CONFIG_NET_RX_BUSY_POLL - napi_hash_add(&cq->napi); -+#endif - napi_enable(&cq->napi); - } - -@@ -163,7 +165,9 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) - { - if (!cq->is_tx) { - napi_disable(&cq->napi); -+#ifdef CONFIG_NET_RX_BUSY_POLL - napi_hash_del(&cq->napi); -+#endif - synchronize_rcu(); - netif_napi_del(&cq->napi); - } -diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c -index xxxxxxx..xxxxxxx xxxxxx ---- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c -+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c -@@ -2111,6 +2111,7 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_ - return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); - } - -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) - static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) - { - struct mlx4_en_priv *en_priv = netdev_priv(dev); -@@ -2118,6 +2119,7 @@ static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_st - - return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); - } -+#endif - static const struct net_device_ops mlx4_netdev_ops = { - .ndo_open = mlx4_en_open, - .ndo_stop = mlx4_en_close, -@@ -2161,7 +2163,9 @@ static const struct net_device_ops mlx4_netdev_ops_master = { - .ndo_set_vf_mac = mlx4_en_set_vf_mac, - .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, - .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) - .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, -+#endif - .ndo_get_vf_config = mlx4_en_get_vf_config, - #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx4_en_netpoll, -diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c -index xxxxxxx..xxxxxxx xxxxxx ---- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c -+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c -@@ -767,7 +767,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud - timestamp); - } - -+#ifdef CONFIG_NET_RX_BUSY_POLL - skb_mark_napi_id(skb, &cq->napi); -+#endif - - /* Push it up the stack */ - netif_receive_skb(skb); diff --git a/patches/0004-BACKPORT-mlx4_ib.patch b/patches/0004-BACKPORT-mlx4_ib.patch deleted file mode 100644 index 8b258f3..0000000 --- a/patches/0004-BACKPORT-mlx4_ib.patch +++ /dev/null @@ -1,38 +0,0 @@ -From: Vladimir Sokolovsky -Subject: [PATCH] BACKPORT: mlx4_ib - -Signed-off-by: Vladimir Sokolovsky ---- - drivers/infiniband/hw/mlx4/cm.c | 10 ++++++++++ - 1 files changed, 10 insertions(+), 0 deletions(-) - -diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c -index xxxxxxx..xxxxxxx xxxxxx ---- a/drivers/infiniband/hw/mlx4/cm.c -+++ b/drivers/infiniband/hw/mlx4/cm.c -@@ -204,6 +204,9 @@ static struct id_map_entry * - id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) - { - int ret; -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) -+ static int next_id; -+#endif - struct id_map_entry *ent; - struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; - -@@ -222,8 +225,15 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) - idr_preload(GFP_KERNEL); - spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); - -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) - ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT); -+#else -+ ret = idr_alloc(&sriov->pv_id_table, ent, next_id, 0, GFP_NOWAIT); -+#endif - if (ret >= 0) { -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) -+ next_id = max(ret + 1, 0); -+#endif - ent->pv_cm_id = (u32)ret; - sl_id_map_add(ibdev, ent); - list_add_tail(&ent->list, &sriov->cm_list); diff --git a/patches/0001-iw_cxgb3-iw_cxgb4-Enable-header-file-inclusion-with-.patch b/patches/0004-iw_cxgb3-iw_cxgb4-Enable-header-file-inclusion-with-.patch similarity index 100% rename from patches/0001-iw_cxgb3-iw_cxgb4-Enable-header-file-inclusion-with-.patch rename to patches/0004-iw_cxgb3-iw_cxgb4-Enable-header-file-inclusion-with-.patch diff --git a/patches/quiltrc b/patches/quiltrc new file mode 100644 index 0000000..daf48dd --- /dev/null +++ b/patches/quiltrc @@ -0,0 +1,2 @@ +QUILT_DIFF_OPTS='-x .svn -p --ignore-matching-lines=$Id' +QUILT_PATCH_OPTS='-l' -- 2.46.0