compat-$(CONFIG_COMPAT_KERNEL_3_5) += compat-3.5.o
compat-$(CONFIG_COMPAT_KERNEL_3_6) += compat-3.6.o
compat-$(CONFIG_COMPAT_KERNEL_3_7) += compat-3.7.o
+compat-$(CONFIG_COMPAT_KERNEL_3_9) += compat-3.9.o
compat-$(CONFIG_COMPAT_CORDIC) += cordic.o
compat-$(CONFIG_COMPAT_CRC8) += crc8.o
--- /dev/null
+/*
+ * Copyright 2013 Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Compatibility file for Linux RDMA for kernels 3.9.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/export.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/igmp.h>
+#include <linux/icmp.h>
+#include <linux/sctp.h>
+#include <linux/dccp.h>
+#include <linux/if_tunnel.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <net/flow_keys.h>
+
+static u32 hashrnd __read_mostly;
+
+#define get_xps_queue LINUX_BACKPORT(get_xps_queue)
+static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+ struct xps_dev_maps *dev_maps;
+ struct xps_map *map;
+ int queue_index = -1;
+
+ rcu_read_lock();
+ dev_maps = rcu_dereference(dev->xps_maps);
+ if (dev_maps) {
+ map = rcu_dereference(
+ dev_maps->cpu_map[raw_smp_processor_id()]);
+ if (map) {
+ if (map->len == 1)
+ queue_index = map->queues[0];
+ else {
+ u32 hash;
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+ hash = (__force u16) skb->protocol ^
+ skb->rxhash;
+ hash = jhash_1word(hash, hashrnd);
+ queue_index = map->queues[
+ ((u64)hash * map->len) >> 32];
+ }
+ if (unlikely(queue_index >= dev->real_num_tx_queues))
+ queue_index = -1;
+ }
+ }
+ rcu_read_unlock();
+
+ return queue_index;
+#else
+ return -1;
+#endif
+}
+
+#define __netdev_pick_tx LINUX_BACKPORT(__netdev_pick_tx)
+u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ int queue_index = sk_tx_queue_get(sk);
+
+ if (queue_index < 0 || skb->ooo_okay ||
+ queue_index >= dev->real_num_tx_queues) {
+ int new_index = get_xps_queue(dev, skb);
+ if (new_index < 0)
+ new_index = skb_tx_hash(dev, skb);
+
+ if (queue_index != new_index && sk &&
+ rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, new_index);
+
+ queue_index = new_index;
+ }
+
+ return queue_index;
+}
+EXPORT_SYMBOL(__netdev_pick_tx);
#include <linux/compat-3.5.h>
#include <linux/compat-3.6.h>
#include <linux/compat-3.7.h>
+#include <linux/compat-3.8.h>
+#include <linux/compat-3.9.h>
#include <linux/compat-3.10.h>
#include <linux/compat-3.11.h>
#include <linux/compat-3.12.h>
--- /dev/null
+#ifndef LINUX_3_8_COMPAT_H
+#define LINUX_3_8_COMPAT_H
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
+
+#define FLOW_MAC_EXT 0x40000000
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)) */
+
+#endif /* LINUX_3_8_COMPAT_H */
--- /dev/null
+#ifndef LINUX_3_9_COMPAT_H
+#define LINUX_3_9_COMPAT_H
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+
+extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) */
+
+#endif /* LINUX_3_9_COMPAT_H */
--- /dev/null
+#ifndef _COMPAT_LINUX_CPU_RMAP_H
+#define _COMPAT_LINUX_CPU_RMAP_H 1
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) || defined (CONFIG_COMPAT_IS_LINUX_CPU_RMAP)
+#include_next <linux/cpu_rmap.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) */
+
+#endif /* _COMPAT_LINUX_CPU_RMAP_H */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
#include_next <linux/llist.h>
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
+#else
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3,1,0)) || (defined(CONFIG_SUSE_KERNEL) && defined(CONFIG_COMPAT_SLES_11_2))
#include_next <linux/llist.h>
#define llist_add_batch LINUX_BACKPORT(llist_add_batch)
extern bool llist_add_batch(struct llist_node *new_first,
struct llist_head *head);
#define llist_del_first LINUX_BACKPORT(llist_del_first)
extern struct llist_node *llist_del_first(struct llist_head *head);
-#else
-#if (defined(CONFIG_COMPAT_SLES_11_2) || defined(CONFIG_COMPAT_SLES_11_3))
-#include_next <linux/llist.h>
#else
#ifndef LLIST_H
struct llist_node *next;
};
+#ifndef LLIST_HEAD_INIT
#define LLIST_HEAD_INIT(name) { NULL }
+#endif
+#ifndef LLIST_HEAD
#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name)
+#endif
/**
* init_llist_head - initialize lock-less list head
#define llist_del_first LINUX_BACKPORT(llist_del_first)
extern struct llist_node *llist_del_first(struct llist_head *head);
-#endif /* (defined(CONFIG_COMPAT_SLES_11_2) || defined(CONFIG_COMPAT_SLES_11_3)) */
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3,1,0)) || (defined(CONFIG_SUSE_KERNEL) && defined(CONFIG_COMPAT_SLES_11_2)) */
#endif /* LLIST_H */
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0) */