compat-$(CONFIG_COMPAT_KERNEL_4_0) += compat-4.0.o
compat-$(CONFIG_COMPAT_KERNEL_4_1) += compat-4.1.o
compat-$(CONFIG_COMPAT_KERNEL_4_16) += compat-4.16.o
+
+compat-y += uuid.o
+compat-y += rhashtable.o
+
+compat-y += ../block/blk-mq-rdma.o
--- /dev/null
+#if (defined(RHEL_MAJOR) && RHEL_MAJOR -0 == 7 && RHEL_MINOR -0 >= 2) || \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+
+#ifndef HAVE_UUID_GEN
+#ifndef HAVE_UUID_BE_TO_BIN
+
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/uuid.h>
+#include <linux/random.h>
+
+const u8 uuid_le_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
+EXPORT_SYMBOL(uuid_le_index);
+const u8 uuid_be_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+EXPORT_SYMBOL(uuid_be_index);
+
+
+/**
+ * uuid_is_valid - checks if UUID string valid
+ * @uuid: UUID string to check
+ *
+ * Description:
+ * It checks if the UUID string is following the format:
+ * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ * where x is a hex digit.
+ *
+ * Return: true if input is valid UUID string.
+ */
+bool uuid_is_valid(const char *uuid)
+{
+ unsigned int i;
+
+ for (i = 0; i < UUID_STRING_LEN; i++) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) {
+ if (uuid[i] != '-')
+ return false;
+ } else if (!isxdigit(uuid[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(uuid_is_valid);
+
+static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
+{
+ static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34};
+ unsigned int i;
+
+ if (!uuid_is_valid(uuid))
+ return -EINVAL;
+
+ for (i = 0; i < 16; i++) {
+ int hi = hex_to_bin(uuid[si[i]] + 0);
+ int lo = hex_to_bin(uuid[si[i]] + 1);
+
+ b[ei[i]] = (hi << 4) | lo;
+ }
+
+ return 0;
+}
+
+int uuid_le_to_bin(const char *uuid, uuid_le *u)
+{
+ return __uuid_to_bin(uuid, u->b, uuid_le_index);
+}
+EXPORT_SYMBOL(uuid_le_to_bin);
+
+int uuid_be_to_bin(const char *uuid, uuid_be *u)
+{
+ return __uuid_to_bin(uuid, u->b, uuid_be_index);
+}
+EXPORT_SYMBOL(uuid_be_to_bin);
+
+
+#endif
+#endif
+
+#endif
],[
AC_MSG_RESULT(no)
])
+ AC_MSG_CHECKING([if struct bio has member bi_disk])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk_types.h>
+ ],[
+ struct bio b = {
+ .bi_disk = NULL,
+ };
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BIO_BI_DISK, 1,
+ [struct bio has member bi_disk])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if bio.h struct bio_integrity_payload has member bip_iter])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/bio.h>
+ #include <linux/bvec.h>
+ ],[
+ struct bvec_iter bip_it = {0};
+ struct bio_integrity_payload bip = {
+ .bip_iter = bip_it,
+ };
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BIO_INTEGRITY_PYLD_BIP_ITER, 1,
+ [bio_integrity_payload has members bip_iter])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h blk_add_request_payload has 4 parameters])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ blk_add_request_payload(NULL, NULL, 0, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_ADD_REQUEST_PAYLOAD_HAS_4_PARAMS, 1,
+ [blkdev.h blk_add_request_payload has 4 parameters])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h has REQ_TYPE_DRV_PRIV])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ enum rq_cmd_type_bits rctb = REQ_TYPE_DRV_PRIV;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLKDEV_REQ_TYPE_DRV_PRIV, 1,
+ [REQ_TYPE_DRV_PRIV is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_freeze_queue_start])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_freeze_queue_start(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_FREEZE_QUEUE_START, 1,
+ [blk_freeze_queue_start is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h has BLK_INTEGRITY_DEVICE_CAPABLE])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ enum blk_integrity_flags bif = BLK_INTEGRITY_DEVICE_CAPABLE;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_INTEGRITY_DEVICE_CAPABLE, 1,
+ [BLK_INTEGRITY_DEVICE_CAPABLE is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h has BLK_MAX_WRITE_HINTS])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ int x = BLK_MAX_WRITE_HINTS;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MAX_WRITE_HINTS, 1,
+ [BLK_MAX_WRITE_HINTS is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h blk_mq_alloc_request has 3 parameters])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_alloc_request(NULL, 0, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_ALLOC_REQUEST_HAS_3_PARAMS, 1,
+ [linux/blk-mq.h blk_mq_alloc_request has 3 parameters])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h blk_mq_complete_request has 2 parameters])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_complete_request(NULL, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_COMPLETE_REQUEST_HAS_2_PARAMS, 1,
+ [linux/blk-mq.h blk_mq_complete_request has 2 parameters])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blk_mq_end_request accepts blk_status_t as second parameter])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ #include <linux/blk_types.h>
+ ],[
+ blk_status_t error = BLK_STS_OK;
+
+ blk_mq_end_request(NULL, error);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_END_REQUEST_TAKES_BLK_STATUS_T, 1,
+ [blk_mq_end_request accepts blk_status_t as second parameter])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+
+ AC_MSG_CHECKING([if blkdev.h has blk_mq_quiesce_queue])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_quiesce_queue(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_QUIESCE_QUEUE, 1,
+ [blk_mq_quiesce_queue exist])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h blk_mq_reinit_tagset takes 2 params])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_reinit_tagset(NULL, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_REINIT_TAGSET_2_PARAM, 1,
+ [blk_mq_reinit_tagset takes 2 params])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blk-mq.h blk_mq_requeue_request has 2 parameters])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_requeue_request(NULL, false);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_REQUEUE_REQUEST_2_PARAMS, 1,
+ [blk-mq.h blk_mq_requeue_request has 2 parameters])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_tagset_iter])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_tagset_iter(NULL, NULL, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_TAGSET_ITER, 1,
+ [blk_mq_tagset_iter is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_unquiesce_queue])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_unquiesce_queue(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_UNQUIESCE_QUEUE, 1,
+ [blk_mq_unquiesce_queue is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blkdev.h has blk_queue_max_write_zeroes_sectors])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ blk_queue_max_write_zeroes_sectors(NULL, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_QUEUE_MAX_WRITE_ZEROES_SECTORS, 1,
+ [blk_queue_max_write_zeroes_sectors is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h has blk_rq_nr_discard_segments])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ blk_rq_nr_discard_segments(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_RQ_NR_DISCARD_SEGMENTS, 1,
+ [blk_rq_nr_discard_segments is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk_types.h has REQ_INTEGRITY])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk_types.h>
+ ],[
+ int x = REQ_INTEGRITY;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_TYPES_REQ_INTEGRITY, 1,
+ [REQ_INTEGRITY is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk_types.h has REQ_OP_DISCARD])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk_types.h>
+ ],[
+ int x = REQ_OP_DISCARD;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_TYPES_REQ_OP_DISCARD, 1,
+ [REQ_OP_DISCARD is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk_types.h has REQ_OP_DRV_OUT])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk_types.h>
+ ],[
+ enum req_opf xx = REQ_OP_DRV_OUT;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_TYPES_REQ_OP_DRV_OUT, 1,
+ [REQ_OP_DRV_OUT is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk_types.h has REQ_OP_FLUSH])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk_types.h>
+ ],[
+ int x = REQ_OP_FLUSH;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_TYPES_REQ_OP_FLUSH, 1,
+ [REQ_OP_FLUSH is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if cleanup_srcu_struct_quiesced exists])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/srcu.h>
+ ],[
+ cleanup_srcu_struct_quiesced(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_CLEANUP_SRCU_STRUCT_QUIESCED, 1,
+ [linux/srcu.h cleanup_srcu_struct_quiesced is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if genhd.h has device_add_disk])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/genhd.h>
+ ],[
+ device_add_disk(NULL, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DEVICE_ADD_DISK, 1,
+ [genhd.h has device_add_disk])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if device.h has device_remove_file_self])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/device.h>
+ ],[
+ device_remove_file_self(NULL, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DEVICE_REMOVE_FILE_SELF, 1,
+ [device.h has device_remove_file_self])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pm.h struct dev_pm_info has member set_latency_tolerance])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pm.h>
+ #include <asm/device.h>
+ #include <linux/types.h>
+
+ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
+ {
+ return;
+ }
+ ],[
+ struct dev_pm_info dpinfo = {
+ .set_latency_tolerance = nvme_set_latency_tolerance,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DEV_PM_INFO_SET_LATENCY_TOLERANCE, 1,
+ [set_latency_tolerance is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/lightnvm.h exists])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/lightnvm.h>
+ ],[
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_LIGHTNVM_H, 1,
+ [linux/lightnvm.h exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if lightnvm.h struct nvm_dev has member dev])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/lightnvm.h>
+ ],[
+ struct device devx = {0};
+ struct nvm_dev d = {
+ .dev = devx,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_LIGHTNVM_NVM_DEV, 1,
+ [nvm_dev dev is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/sed-opal.h exists])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/sed-opal.h>
+ ],[
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_LINUX_SED_OPAL_H, 1,
+ [linux/sed-opal.h exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if uapi/linux/lightnvm.h has struct nvm_user_vio])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/genhd.h>
+ #include <uapi/linux/lightnvm.h>
+ ],[
+ struct nvm_user_vio vio;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_NVM_USER_VIO, 1,
+ [struct nvm_user_vio is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/pr.h exists])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/fs.h>
+ #include <linux/pr.h>
+ ],[
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PR_H, 1,
+ [linux/pr.h exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h struct request has rq_flags])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ struct request rq = { .rq_flags = 0 };
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_REQUEST_RQ_FLAGS, 1,
+ [blkdev.h struct request has rq_flags])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/t10-pi.h exists])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/t10-pi.h>
+ ],[
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_T10_PI_H, 1,
+ [linux/t10-pi.h exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/uuid.h has uuid_is_null])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/uuid.h>
+ ],[
+ uuid_t uuid;
+ uuid_is_null(&uuid);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_UUID_IS_NULL, 1,
+ [uuid_is_null is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_all_tag_busy_iter])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+
+ static void
+ nvme_cancel_request(struct request *req, void *data, bool reserved) {
+ return;
+ }
+ ],[
+ blk_mq_all_tag_busy_iter(NULL, nvme_cancel_request, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_ALL_TAG_BUSY_ITER, 1,
+ [blk_mq_all_tag_busy_iter is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_freeze_queue_wait_timeout])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_freeze_queue_wait_timeout(NULL, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_FREEZE_QUEUE_WAIT_TIMEOUT, 1,
+ [blk_mq_freeze_queue_wait_timeout is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_freeze_queue_wait_timeout])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_freeze_queue_wait_timeout(NULL, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_FREEZE_QUEUE_WAIT_TIMEOUT, 1,
+ [blk_mq_freeze_queue_wait_timeout is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if struct blk_mq_ops has map_queues])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ struct blk_mq_ops ops = {
+ .map_queues = NULL,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_OPS_MAP_QUEUES, 1,
+ [struct blk_mq_ops has map_queues])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if include/linux/blk-mq-pci.h exists])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq-pci.h>
+ ],[
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_PCI_H, 1,
+ [include/linux/blk-mq-pci.h exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if include/linux/blk-mq-pci.h has blk_mq_pci_map_queues])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq-pci.h>
+ ],[
+ blk_mq_pci_map_queues(NULL, NULL, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_PCI_MAP_QUEUES_3_ARGS, 1,
+ [blk_mq_pci_map_queues is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk_types.h has blk_mq_req_flags_t])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk_types.h>
+ ],[
+ blk_mq_req_flags_t x = 0;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_REQ_FLAGS_T, 1,
+ [blk_mq_req_flags_t is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_tagset_busy_iter])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+
+ static void
+ nvme_cancel_request(struct request *req, void *data, bool reserved) {
+ return;
+ }
+ ],[
+ blk_mq_tagset_busy_iter(NULL, nvme_cancel_request, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_TAGSET_BUSY_ITER, 1,
+ [blk_mq_tagset_busy_iter is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk_types.h has blk_path_error])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/errno.h>
+ #include <linux/blkdev.h>
+ #include <linux/blk_types.h>
+ ],[
+ blk_path_error(0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_PATH_ERROR, 1,
+ [blk_path_error is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blkdev.h has blk_queue_flag_set])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ blk_queue_flag_set(0, NULL);
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_QUEUE_FLAG_SET, 1,
+ [blk_queue_flag_set is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h has blk_queue_write_cache])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ blk_queue_write_cache(NULL, 0, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_QUEUE_WRITE_CACHE, 1,
+ [blkdev.h has blk_queue_write_cache])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blkdev.h has blk_rq_is_passthrough])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ blk_rq_is_passthrough(NULL);
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_RQ_IS_PASSTHROUGH, 1,
+ [blk_rq_is_passthrough is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk_types.h has blk_status_t])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk_types.h>
+ ],[
+ blk_status_t xx;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_STATUS_T, 1,
+ [blk_status_t is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h has QUEUE_FLAG_WC_FUA])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ int x = QUEUE_FLAG_WC;
+ int y = QUEUE_FLAG_FUA;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_QUEUE_FLAG_WC_FUA, 1,
+ [QUEUE_FLAG_WC_FUA is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if struct request_queue has q_usage_counter])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ struct percpu_ref counter = {0};
+ struct request_queue rq = {
+ .q_usage_counter = counter,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_REQUEST_QUEUE_Q_USAGE_COUNTER, 1,
+ [struct request_queue has q_usage_counter])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_freeze_queue_wait_timeout])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_freeze_queue_wait_timeout(NULL, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_FREEZE_QUEUE_WAIT_TIMEOUT, 1,
+ [blk_mq_freeze_queue_wait_timeout is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_freeze_queue_wait])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_freeze_queue_wait(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_FREEZE_QUEUE_WAIT, 1,
+ [blk_mq_freeze_queue_wait is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/io-64-nonatomic-lo-hi.h exists])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/io-64-nonatomic-lo-hi.h>
+ ],[
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IO_64_NONATOMIC_LO_HI_H, 1,
+ [linux/io-64-nonatomic-lo-hi.h exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if slab.h has kmalloc_array])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/slab.h>
+ ],[
+ kmalloc_array(0, 0, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_KMALLOC_ARRAY, 1,
+ [kmalloc_array is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if slab.h has kmalloc_array_node])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/slab.h>
+ ],[
+ kmalloc_array_node(0, 0, 0, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_KMALLOC_ARRAY_NODE, 1,
+ [kmalloc_array_node is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if include/linux/once.h exists])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/once.h>
+ ],[
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_ONCE_H, 1,
+ [include/linux/once.h exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blk-mq.h has BLK_MQ_F_NO_SCHED])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ int x = BLK_MQ_F_NO_SCHED;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_F_NO_SCHED, 1,
+ [BLK_MQ_F_NO_SCHED is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h blk_mq_ops init_request has 4 parameters])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+
+ int init_request(struct blk_mq_tag_set *set, struct request * req,
+ unsigned int i, unsigned int k) {
+ return 0;
+ }
+ ],[
+ struct blk_mq_ops ops = {
+ .init_request = init_request,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_OPS_INIT_REQUEST_HAS_4_PARAMS, 1,
+ [linux/blk-mq.h blk_mq_ops init_request has 4 parameters])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h has blk_mq_poll])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ blk_mq_poll(NULL, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_POLL, 1,
+ [blk_mq_poll exist])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h blk_mq_tag_set member ops is const])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ static const struct blk_mq_ops xmq = {0};
+
+ ],[
+ struct blk_mq_tag_set x = {
+ .ops = &xmq,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_TAG_SET_HAS_CONST_POS, 1,
+ [ blk_mq_tag_set member ops is const])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_update_nr_hw_queues])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_update_nr_hw_queues(NULL, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_UPDATE_NR_HW_QUEUES, 1,
+ [blk_mq_update_nr_hw_queues is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h has blk_rq_payload_bytes])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ blk_rq_payload_bytes(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_RQ_NR_PAYLOAD_BYTES, 1,
+ [blk_rq_payload_bytes exist])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h has blk_rq_nr_phys_segments])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ blk_rq_nr_phys_segments(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_RQ_NR_PHYS_SEGMENTS, 1,
+ [blk_rq_nr_phys_segments exist])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if dma-mapping.h has DMA_ATTR_NO_WARN])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/dma-mapping.h>
+ ],[
+ int x = DMA_ATTR_NO_WARN;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DMA_ATTR_NO_WARN, 1,
+ [DMA_ATTR_NO_WARN is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if dma-mapping.h has dma_alloc_attrs takes unsigned long attrs])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/dma-mapping.h>
+ ],[
+ dma_alloc_attrs(NULL, 0, NULL, GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DMA_SET_ATTR_TAKES_UNSIGNED_LONG_ATTRS, 1,
+ [dma_alloc_attrs takes unsigned long attrs])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if interrupt.h has irq_calc_affinity_vectors with 3 args])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/interrupt.h>
+ ],[
+ int x = irq_calc_affinity_vectors(0, 0, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IRQ_CALC_AFFINITY_VECTORS_3_ARGS, 1,
+ [irq_calc_affinity_vectors is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+ AC_MSG_CHECKING([if pci.h pci_bus_addr_t])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+ ],[
+ pci_bus_addr_t x = 0;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_BUS_ADDR_T, 1,
+ [pci_bus_addr_t is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pci.h struct pci_error_handlers has reset_done])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+
+ void reset_done(struct pci_dev *dev) {
+ return;
+ }
+ ],[
+ struct pci_error_handlers x = {
+ .reset_done = reset_done,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_ERROR_HANDLERS_RESET_DONE, 1,
+ [pci.h struct pci_error_handlers has reset_done])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pci.h struct pci_error_handlers has reset_notify])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+
+ void reset(struct pci_dev *dev, bool prepare) {
+ return;
+ }
+ ],[
+ struct pci_error_handlers x = {
+ .reset_notify = reset,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_ERROR_HANDLERS_RESET_NOTIFY, 1,
+ [pci.h struct pci_error_handlers has reset_notify])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pci.h struct pci_error_handlers has reset_prepare])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+
+ void reset_prepare(struct pci_dev *dev) {
+ return;
+ }
+ ],[
+ struct pci_error_handlers x = {
+ .reset_prepare = reset_prepare,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_ERROR_HANDLERS_RESET_PREPARE, 1,
+ [pci.h struct pci_error_handlers has reset_prepare])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/pci.h has pci_free_irq])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+ ],[
+ pci_free_irq(NULL, 0, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_FREE_IRQ, 1,
+ [linux/pci.h has pci_free_irq])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if blkdev.h has req_op])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blkdev.h>
+ ],[
+ struct request *req;
+ req_op(req);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_REQ_OP, 1,
+ [req_op exist])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_alloc_request_hctx])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_alloc_request_hctx(NULL, 0, 0, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_ALLOC_REQUEST_HCTX, 1,
+ [linux/blk-mq.h has blk_mq_alloc_request_hctx])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h has blk_mq_map_queues])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+ ],[
+ blk_mq_map_queues(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_MAP_QUEUES, 1,
+ [blk_mq_map_queues is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h blk_mq_ops exit_request has 3 parameters])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+
+ void exit_request(struct blk_mq_tag_set *set, struct request * req,
+ unsigned int i) {
+ return;
+ }
+ ],[
+ struct blk_mq_ops ops = {
+ .exit_request = exit_request,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_OPS_EXIT_REQUEST_HAS_3_PARAMS, 1,
+ [linux/blk-mq.h blk_mq_ops exit_request has 3 parameters])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if scsi.h has SCSI_MAX_SG_SEGMENTS])
+ LB_LINUX_TRY_COMPILE([
+ #include <scsi/scsi.h>
+ ],[
+ int x = SCSI_MAX_SG_SEGMENTS;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SCSI_MAX_SG_SEGMENTS, 1,
+ [SCSI_MAX_SG_SEGMENTS is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/scatterlist.h sg_alloc_table_chained has 4 parameters])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/scatterlist.h>
+ ],[
+ gfp_t gfp_mask;
+ sg_alloc_table_chained(NULL, 0, gfp_mask, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SG_ALLOC_TABLE_CHAINED_4_PARAMS, 1,
+ [sg_alloc_table_chained has 4 parameters])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/blk-mq.h struct blk_mq_ops has field reinit_request])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/blk-mq.h>
+
+ static int
+ nvme_fc_reinit_request(void *data, struct request *rq)
+ {
+ return 0;
+ }
+ ],[
+ static struct blk_mq_ops nvme_fc_mq_ops = {
+ .reinit_request = nvme_fc_reinit_request,
+ };
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_MQ_OPS_REINIT_REQUEST, 1,
+ [struct blk_mq_ops has field reinit_request])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/nvme-fc-driver.h exists])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/scatterlist.h>
+ #include <uapi/scsi/fc/fc_fs.h>
+ #include <linux/nvme-fc-driver.h>
+ ],[
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_LINUX_NVME_FC_DRIVER_H, 1,
+ [linux/nvme-fc-driver.h exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pci.h has pci_irq_get_affinity])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+ ],[
+ pci_irq_get_affinity(NULL, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_IRQ_GET_AFFINITY, 1,
+ [pci_irq_get_affinity is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ LB_CHECK_SYMBOL_EXPORT([elfcorehdr_addr],
+ [kernel/crash_dump.c],
+ [AC_DEFINE(HAVE_ELFCOREHDR_ADDR_EXPORTED, 1,
+ [elfcorehdr_addr is exported by the kernel])],
+ [])
+
+ LB_CHECK_SYMBOL_EXPORT([fib_lookup],
+ [net/ipv4/fib_rules.c],
+ [AC_DEFINE(HAVE_FIB_LOOKUP_EXPORTED, 1,
+ [fib_lookup is exported by the kernel])],
+ [])
+
+ AC_MSG_CHECKING([if pci.h has pci_irq_get_node])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+ ],[
+ pci_irq_get_node(NULL, 0);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_IRQ_GET_NODE, 1,
+ [pci_irq_get_node is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pci.h has pci_num_vf])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+ ],[
+ struct pci_dev x;
+ pci_num_vf(&x);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_NUM_VF, 1,
+ [pci_num_vf is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pci.h pci_physfn])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+ ],[
+ struct pci_dev x;
+ pci_physfn(&x);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_PHYSFN, 1,
+ [pci_physfn is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pci.h has pci_release_mem_regions])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+ ],[
+ pci_release_mem_regions(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_RELEASE_MEM_REGIONS, 1,
+ [pci_release_mem_regions is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pci.h has pci_request_mem_regions])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+ ],[
+ pci_request_mem_regions(NULL, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_REQUEST_MEM_REGIONS, 1,
+ [pci_request_mem_regions is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pci.h pci_sriov_get_totalvfs])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+ ],[
+ int x = pci_sriov_get_totalvfs(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_SRIOV_GET_TOTALVFS, 1,
+ [pci_sriov_get_totalvfs is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pci.h has pci_vfs_assigned])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pci.h>
+ ],[
+ struct pci_dev pdev;
+ pci_vfs_assigned(&pdev);
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PCI_VFS_ASSIGNED, 1,
+ [pci_vfs_assigned is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if include/linux/sizes.h exists])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/sizes.h>
+ ],[
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_SIZES_H, 1,
+ [include/linux/sizes.h exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/uuid.h has uuid_be_to_bin])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/uuid.h>
+ ],[
+ uuid_be_to_bin(NULL, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_UUID_BE_TO_BIN, 1,
+ [uuid_be_to_bin is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/uuid.h has uuid_equal])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/uuid.h>
+ ],[
+ uuid_equal(NULL, NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_UUID_EQUAL, 1,
+ [uuid_equal is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if linux/uuid.h has uuid_gen])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/uuid.h>
+ ],[
+ uuid_t id;
+ uuid_gen(&id);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_UUID_GEN, 1,
+ [uuid_gen is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pm_qos.h has DEV_PM_QOS_LATENCY_TOLERANCE])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pm_qos.h>
+ ],[
+ enum dev_pm_qos_req_type type = DEV_PM_QOS_LATENCY_TOLERANCE;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DEV_PM_QOS_LATENCY_TOLERANCE, 1,
+ [DEV_PM_QOS_LATENCY_TOLERANCE is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pm_qos.h has DEV_PM_QOS_RESUME_LATENCY])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pm_qos.h>
+ ],[
+ enum dev_pm_qos_req_type type = DEV_PM_QOS_RESUME_LATENCY;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DEV_PM_QOS_RESUME_LATENCY, 1,
+ [DEV_PM_QOS_RESUME_LATENCY is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if pm_qos.h has PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT])
+ LB_LINUX_TRY_COMPILE([
+ #include <linux/pm_qos.h>
+ ],[
+ int x = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT, 1,
+ [PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ AC_MSG_CHECKING([if route.h has ip4_dst_hoplimit])
+ LB_LINUX_TRY_COMPILE([
+ #include <net/route.h>
+ ],[
+ ip4_dst_hoplimit(NULL);
+
+ return 0;
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IP4_DST_HOPLIMIT, 1,
+ [ip4_dst_hoplimit is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+
+ LB_CHECK_SYMBOL_EXPORT([irq_to_desc],
+ [kernel/irq/irqdesc.c],
+ [AC_DEFINE(HAVE_IRQ_TO_DESC_EXPORTED, 1,
+ [irq_to_desc is exported by the kernel])],
+ [])
+
+ LB_CHECK_SYMBOL_EXPORT([dev_pm_qos_update_user_latency_tolerance],
+ [drivers/base/power/qos.c],
+ [AC_DEFINE(HAVE_PM_QOS_UPDATE_USER_LATENCY_TOLERANCE_EXPORTED, 1,
+ [dev_pm_qos_update_user_latency_tolerance is exported by the kernel])],
+ [])
+
])
#
# COMPAT_CONFIG_HEADERS
--- /dev/null
+#ifndef _COMPAT_LINUX_BLK_MQ_PCI_H
+#define _COMPAT_LINUX_BLK_MQ_PCI_H 1
+
+#include "../../compat/config.h"
+
+#ifdef HAVE_BLK_MQ_PCI_H
+#include_next <linux/blk-mq-pci.h>
+#endif
+
+#if defined(HAVE_BLK_MQ_OPS_MAP_QUEUES) && \
+ !defined(HAVE_BLK_MQ_PCI_MAP_QUEUES_3_ARGS)
+
+#include <linux/blk-mq.h>
+#include <linux/pci.h>
+
+static inline
+int __blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+ int offset)
+{
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ mask = pci_irq_get_affinity(pdev, queue + offset);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ set->mq_map[cpu] = queue;
+ }
+
+ return 0;
+
+fallback:
+ WARN_ON_ONCE(set->nr_hw_queues > 1);
+ for_each_possible_cpu(cpu)
+ set->mq_map[cpu] = 0;
+ return 0;
+}
+#endif
+
+#endif /* _COMPAT_LINUX_BLK_MQ_PCI_H */
--- /dev/null
+#ifndef _COMPAT_LINUX_BLK_MQ_H
+#define _COMPAT_LINUX_BLK_MQ_H
+
+#include "../../compat/config.h"
+#include <linux/version.h>
+
+#include_next <linux/blk-mq.h>
+
+#ifndef HAVE_BLK_MQ_FREEZE_QUEUE_WAIT_TIMEOUT
+static inline int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
+ unsigned long timeout)
+{
+ return wait_event_timeout(q->mq_freeze_wq,
+#ifdef HAVE_REQUEST_QUEUE_Q_USAGE_COUNTER
+ percpu_ref_is_zero(&q->q_usage_counter),
+#else
+ percpu_ref_is_zero(&q->mq_usage_counter),
+#endif
+ timeout);
+}
+#endif
+
+#ifndef HAVE_BLK_MQ_FREEZE_QUEUE_WAIT
+static inline void blk_mq_freeze_queue_wait(struct request_queue *q)
+{
+#ifdef HAVE_REQUEST_QUEUE_Q_USAGE_COUNTER
+ wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
+#else
+ wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+#endif
+}
+#endif
+
+#if !defined(HAVE_BLK_MQ_TAGSET_BUSY_ITER) && \
+ defined(HAVE_BLK_MQ_ALL_TAG_BUSY_ITER)
+static inline void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+ busy_tag_iter_fn *fn, void *priv)
+{
+ int i;
+
+ for (i = 0; i < tagset->nr_hw_queues; i++) {
+ if (tagset->tags && tagset->tags[i])
+ blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
+ }
+}
+#endif
+
+#ifndef HAVE_BLK_STATUS_T
+
+typedef int blk_status_t;
+#define BLK_STS_OK BLK_MQ_RQ_QUEUE_OK
+#define BLK_STS_RESOURCE BLK_MQ_RQ_QUEUE_BUSY
+#define BLK_STS_IOERR BLK_MQ_RQ_QUEUE_ERROR
+
+#define BLK_STS_NOSPC -ENOSPC
+#define BLK_STS_NOTSUPP -EOPNOTSUPP
+#define BLK_STS_MEDIUM -ENODATA
+#define BLK_STS_TIMEOUT -ETIMEDOUT
+#define BLK_STS_TRANSPORT -ENOLINK
+#define BLK_STS_TARGET -EREMOTEIO
+#define BLK_STS_NEXUS -EBADE
+#define BLK_STS_PROTECTION -EILSEQ
+
+#endif /* HAVE_BLK_STATUS_T */
+
+#ifndef HAVE_BLK_PATH_ERROR
+static inline bool blk_path_error(blk_status_t error)
+{
+ switch (error) {
+ case BLK_STS_NOTSUPP:
+ case BLK_STS_NOSPC:
+ case BLK_STS_TARGET:
+ case BLK_STS_NEXUS:
+ case BLK_STS_MEDIUM:
+ case BLK_STS_PROTECTION:
+ return false;
+ }
+
+ /* Anything else could be a path failure, so should be retried */
+ return true;
+}
+#endif
+
+#endif /* _COMPAT_LINUX_BLK_MQ_H */
--- /dev/null
+#ifndef __COMPAT_LINUX_BLK_TYPES_H
+#define __COMPAT_LINUX_BLK_TYPES_H
+
+#include "../../compat/config.h"
+
+#include_next <linux/blk_types.h>
+
+#ifndef HAVE_BLK_MQ_REQ_FLAGS_T
+typedef __u32 __bitwise blk_mq_req_flags_t;
+#endif
+
+#endif /* __COMPAT_LINUX_BLK_TYPES_H */
--- /dev/null
+#ifndef _COMPAT_LINUX_BLKDEV_H
+#define _COMPAT_LINUX_BLKDEV_H
+
+#include "../../compat/config.h"
+
+#include_next <linux/blkdev.h>
+
+#ifndef HAVE_BLK_RQ_IS_PASSTHROUGH
+static inline bool blk_rq_is_passthrough(struct request *rq)
+{
+ return rq->cmd_type != REQ_TYPE_FS;
+}
+#endif
+
+#ifndef HAVE_BLK_QUEUE_WRITE_CACHE
+#ifdef HAVE_QUEUE_FLAG_WC_FUA
+static inline void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+ spin_lock_irq(q->queue_lock);
+ if (wc)
+ queue_flag_set(QUEUE_FLAG_WC, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_WC, q);
+ if (fua)
+ queue_flag_set(QUEUE_FLAG_FUA, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_FUA, q);
+ spin_unlock_irq(q->queue_lock);
+
+ wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+}
+#else
+static inline void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+}
+#endif
+#endif
+
+#ifndef HAVE_BLK_QUEUE_FLAG_SET
+static inline void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ queue_flag_set(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static inline void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ queue_flag_clear(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static inline bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ res = queue_flag_test_and_set(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return res;
+}
+#endif
+
+#endif /* _COMPAT_LINUX_BLKDEV_H */
--- /dev/null
+/*
+ * Copyright (c) 2016, Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _NVME_FC_DRIVER_H
+#define _NVME_FC_DRIVER_H 1
+
+
+/*
+ * ********************** LLDD FC-NVME Host API ********************
+ *
+ * For FC LLDD's that are the NVME Host role.
+ *
+ * ******************************************************************
+ */
+
+
+
+/* FC Port role bitmask - can merge with FC Port Roles in fc transport */
+#define FC_PORT_ROLE_NVME_INITIATOR 0x10
+#define FC_PORT_ROLE_NVME_TARGET 0x20
+#define FC_PORT_ROLE_NVME_DISCOVERY 0x40
+
+
+/**
+ * struct nvme_fc_port_info - port-specific ids and FC connection-specific
+ * data element used during NVME Host role
+ * registrations
+ *
+ * Static fields describing the port being registered:
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx)
+ * @dev_loss_tmo: maximum delay for reconnects to an association on
+ * this device. Used only on a remoteport.
+ *
+ * Initialization values for dynamic port fields:
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ */
+struct nvme_fc_port_info {
+ u64 node_name;
+ u64 port_name;
+ u32 port_role;
+ u32 port_id;
+ u32 dev_loss_tmo;
+};
+
+
+/**
+ * struct nvmefc_ls_req - Request structure passed from NVME-FC transport
+ * to LLDD in order to perform a NVME FC-4 LS
+ * request and obtain a response.
+ *
+ * Values set by the NVME-FC layer prior to calling the LLDD ls_req
+ * entrypoint.
+ * @rqstaddr: pointer to request buffer
+ * @rqstdma: PCI DMA address of request buffer
+ * @rqstlen: Length, in bytes, of request buffer
+ * @rspaddr: pointer to response buffer
+ * @rspdma: PCI DMA address of response buffer
+ * @rsplen: Length, in bytes, of response buffer
+ * @timeout: Maximum amount of time, in seconds, to wait for the LS response.
+ * If timeout exceeded, LLDD to abort LS exchange and complete
+ * LS request with error status.
+ * @private: pointer to memory allocated alongside the ls request structure
+ * that is specifically for the LLDD to use while processing the
+ * request. The length of the buffer corresponds to the
+ * lsrqst_priv_sz value specified in the nvme_fc_port_template
+ * supplied by the LLDD.
+ * @done: The callback routine the LLDD is to invoke upon completion of
+ * the LS request. req argument is the pointer to the original LS
+ * request structure. Status argument must be 0 upon success, a
+ * negative errno on failure (example: -ENXIO).
+ */
+struct nvmefc_ls_req {
+ void *rqstaddr;
+ dma_addr_t rqstdma;
+ u32 rqstlen;
+ void *rspaddr;
+ dma_addr_t rspdma;
+ u32 rsplen;
+ u32 timeout;
+
+ void *private;
+
+ void (*done)(struct nvmefc_ls_req *req, int status);
+
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+
+enum nvmefc_fcp_datadir {
+ NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */
+ NVMEFC_FCP_WRITE,
+ NVMEFC_FCP_READ,
+};
+
+
+/**
+ * struct nvmefc_fcp_req - Request structure passed from NVME-FC transport
+ * to LLDD in order to perform a NVME FCP IO operation.
+ *
+ * Values set by the NVME-FC layer prior to calling the LLDD fcp_io
+ * entrypoint.
+ * @cmdaddr: pointer to the FCP CMD IU buffer
+ * @rspaddr: pointer to the FCP RSP IU buffer
+ * @cmddma: PCI DMA address of the FCP CMD IU buffer
+ * @rspdma: PCI DMA address of the FCP RSP IU buffer
+ * @cmdlen: Length, in bytes, of the FCP CMD IU buffer
+ * @rsplen: Length, in bytes, of the FCP RSP IU buffer
+ * @payload_length: Length of DATA_IN or DATA_OUT payload data to transfer
+ * @sg_table: scatter/gather structure for payload data
+ * @first_sgl: memory for 1st scatter/gather list segment for payload data
+ * @sg_cnt: number of elements in the scatter/gather list
+ * @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx)
+ * @sqid: The nvme SQID the command is being issued on
+ * @done: The callback routine the LLDD is to invoke upon completion of
+ * the FCP operation. req argument is the pointer to the original
+ * FCP IO operation.
+ * @private: pointer to memory allocated alongside the FCP operation
+ * request structure that is specifically for the LLDD to use
+ * while processing the operation. The length of the buffer
+ * corresponds to the fcprqst_priv_sz value specified in the
+ * nvme_fc_port_template supplied by the LLDD.
+ *
+ * Values set by the LLDD indicating completion status of the FCP operation.
+ * Must be set prior to calling the done() callback.
+ * @transferred_length: amount of payload data, in bytes, that were
+ * transferred. Should equal payload_length on success.
+ * @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
+ * @status: Completion status of the FCP operation. must be 0 upon success,
+ * negative errno value upon failure (ex: -EIO). Note: this is
+ * NOT a reflection of the NVME CQE completion status. Only the
+ * status of the FCP operation at the NVME-FC level.
+ */
+struct nvmefc_fcp_req {
+ void *cmdaddr;
+ void *rspaddr;
+ dma_addr_t cmddma;
+ dma_addr_t rspdma;
+ u16 cmdlen;
+ u16 rsplen;
+
+ u32 payload_length;
+ struct sg_table sg_table;
+ struct scatterlist *first_sgl;
+ int sg_cnt;
+ enum nvmefc_fcp_datadir io_dir;
+
+ __le16 sqid;
+
+ void (*done)(struct nvmefc_fcp_req *req);
+
+ void *private;
+
+ u32 transferred_length;
+ u16 rcv_rsplen;
+ u32 status;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+
+/*
+ * Direct copy of fc_port_state enum. For later merging
+ */
+enum nvme_fc_obj_state {
+ FC_OBJSTATE_UNKNOWN,
+ FC_OBJSTATE_NOTPRESENT,
+ FC_OBJSTATE_ONLINE,
+ FC_OBJSTATE_OFFLINE, /* User has taken Port Offline */
+ FC_OBJSTATE_BLOCKED,
+ FC_OBJSTATE_BYPASSED,
+ FC_OBJSTATE_DIAGNOSTICS,
+ FC_OBJSTATE_LINKDOWN,
+ FC_OBJSTATE_ERROR,
+ FC_OBJSTATE_LOOPBACK,
+ FC_OBJSTATE_DELETED,
+};
+
+
+/**
+ * struct nvme_fc_local_port - structure used between NVME-FC transport and
+ * a LLDD to reference a local NVME host port.
+ * Allocated/created by the nvme_fc_register_localport()
+ * transport interface.
+ *
+ * Fields with static values for the port. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_num: NVME-FC transport host port number
+ * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx)
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @private: pointer to memory allocated alongside the local port
+ * structure that is specifically for the LLDD to use.
+ * The length of the buffer corresponds to the local_priv_sz
+ * value specified in the nvme_fc_port_template supplied by
+ * the LLDD.
+ * @dev_loss_tmo: maximum delay for reconnects to an association on
+ * this device. To modify, lldd must call
+ * nvme_fc_set_remoteport_devloss().
+ *
+ * Fields with dynamic values. Values may change base on link state. LLDD
+ * may reference fields directly to change them. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ * @port_state: Operational state of the port.
+ */
+struct nvme_fc_local_port {
+ /* static/read-only fields */
+ u32 port_num;
+ u32 port_role;
+ u64 node_name;
+ u64 port_name;
+
+ void *private;
+
+ /* dynamic fields */
+ u32 port_id;
+ enum nvme_fc_obj_state port_state;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+
+/**
+ * struct nvme_fc_remote_port - structure used between NVME-FC transport and
+ * a LLDD to reference a remote NVME subsystem port.
+ * Allocated/created by the nvme_fc_register_remoteport()
+ * transport interface.
+ *
+ * Fields with static values for the port. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_num: NVME-FC transport remote subsystem port number
+ * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx)
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @localport: pointer to the NVME-FC local host port the subsystem is
+ * connected to.
+ * @private: pointer to memory allocated alongside the remote port
+ * structure that is specifically for the LLDD to use.
+ * The length of the buffer corresponds to the remote_priv_sz
+ * value specified in the nvme_fc_port_template supplied by
+ * the LLDD.
+ *
+ * Fields with dynamic values. Values may change base on link or login
+ * state. LLDD may reference fields directly to change them. Initialized by
+ * the port_info struct supplied to the registration call.
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ * @port_state: Operational state of the remote port. Valid values are
+ * ONLINE or UNKNOWN.
+ */
+struct nvme_fc_remote_port {
+ /* static fields */
+ u32 port_num;
+ u32 port_role;
+ u64 node_name;
+ u64 port_name;
+ struct nvme_fc_local_port *localport;
+ void *private;
+ u32 dev_loss_tmo;
+
+ /* dynamic fields */
+ u32 port_id;
+ enum nvme_fc_obj_state port_state;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+
+/**
+ * struct nvme_fc_port_template - structure containing static entrypoints and
+ * operational parameters for an LLDD that supports NVME host
+ * behavior. Passed by reference in port registrations.
+ * NVME-FC transport remembers template reference and may
+ * access it during runtime operation.
+ *
+ * Host/Initiator Transport Entrypoints/Parameters:
+ *
+ * @localport_delete: The LLDD initiates deletion of a localport via
+ * nvme_fc_deregister_localport(). However, the teardown is
+ * asynchronous. This routine is called upon the completion of the
+ * teardown to inform the LLDD that the localport has been deleted.
+ * Entrypoint is Mandatory.
+ *
+ * @remoteport_delete: The LLDD initiates deletion of a remoteport via
+ * nvme_fc_deregister_remoteport(). However, the teardown is
+ * asynchronous. This routine is called upon the completion of the
+ * teardown to inform the LLDD that the remoteport has been deleted.
+ * Entrypoint is Mandatory.
+ *
+ * @create_queue: Upon creating a host<->controller association, queues are
+ * created such that they can be affinitized to cpus/cores. This
+ * callback into the LLDD to notify that a controller queue is being
+ * created. The LLDD may choose to allocate an associated hw queue
+ * or map it onto a shared hw queue. Upon return from the call, the
+ * LLDD specifies a handle that will be given back to it for any
+ * command that is posted to the controller queue. The handle can
+ * be used by the LLDD to map quickly to the proper hw queue for
+ * command execution. The mask of cpu's that will map to this queue
+ * at the block-level is also passed in. The LLDD should use the
+ * queue id and/or cpu masks to ensure proper affinitization of the
+ * controller queue to the hw queue.
+ * Entrypoint is Optional.
+ *
+ * @delete_queue: This is the inverse of the crete_queue. During
+ * host<->controller association teardown, this routine is called
+ * when a controller queue is being terminated. Any association with
+ * a hw queue should be termined. If there is a unique hw queue, the
+ * hw queue should be torn down.
+ * Entrypoint is Optional.
+ *
+ * @poll_queue: Called to poll for the completion of an io on a blk queue.
+ * Entrypoint is Optional.
+ *
+ * @ls_req: Called to issue a FC-NVME FC-4 LS service request.
+ * The nvme_fc_ls_req structure will fully describe the buffers for
+ * the request payload and where to place the response payload. The
+ * LLDD is to allocate an exchange, issue the LS request, obtain the
+ * LS response, and call the "done" routine specified in the request
+ * structure (argument to done is the ls request structure itself).
+ * Entrypoint is Mandatory.
+ *
+ * @fcp_io: called to issue a FC-NVME I/O request. The I/O may be for
+ * an admin queue or an i/o queue. The nvmefc_fcp_req structure will
+ * fully describe the io: the buffer containing the FC-NVME CMD IU
+ * (which contains the SQE), the sg list for the payload if applicable,
+ * and the buffer to place the FC-NVME RSP IU into. The LLDD will
+ * complete the i/o, indicating the amount of data transferred or
+ * any transport error, and call the "done" routine specified in the
+ * request structure (argument to done is the fcp request structure
+ * itself).
+ * Entrypoint is Mandatory.
+ *
+ * @ls_abort: called to request the LLDD to abort the indicated ls request.
+ * The call may return before the abort has completed. After aborting
+ * the request, the LLDD must still call the ls request done routine
+ * indicating an FC transport Aborted status.
+ * Entrypoint is Mandatory.
+ *
+ * @fcp_abort: called to request the LLDD to abort the indicated fcp request.
+ * The call may return before the abort has completed. After aborting
+ * the request, the LLDD must still call the fcp request done routine
+ * indicating an FC transport Aborted status.
+ * Entrypoint is Mandatory.
+ *
+ * @max_hw_queues: indicates the maximum number of hw queues the LLDD
+ * supports for cpu affinitization.
+ * Value is Mandatory. Must be at least 1.
+ *
+ * @max_sgl_segments: indicates the maximum number of sgl segments supported
+ * by the LLDD
+ * Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @max_dif_sgl_segments: indicates the maximum number of sgl segments
+ * supported by the LLDD for DIF operations.
+ * Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @dma_boundary: indicates the dma address boundary where dma mappings
+ * will be split across.
+ * Value is Mandatory. Typical value is 0xFFFFFFFF to split across
+ * 4Gig address boundarys
+ *
+ * @local_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like fc nvme layer to allocate on the LLDD's
+ * behalf whenever a localport is allocated. The additional memory
+ * area solely for the of the LLDD and its location is specified by
+ * the localport->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ *
+ * @remote_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like fc nvme layer to allocate on the LLDD's
+ * behalf whenever a remoteport is allocated. The additional memory
+ * area solely for the of the LLDD and its location is specified by
+ * the remoteport->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ *
+ * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like fc nvme layer to allocate on the LLDD's
+ * behalf whenever a ls request structure is allocated. The additional
+ * memory area solely for the of the LLDD and its location is
+ * specified by the ls_request->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ *
+ * @fcprqst_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like fc nvme layer to allocate on the LLDD's
+ * behalf whenever a fcp request structure is allocated. The additional
+ * memory area solely for the of the LLDD and its location is
+ * specified by the fcp_request->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ */
+struct nvme_fc_port_template {
+ /* initiator-based functions */
+ void (*localport_delete)(struct nvme_fc_local_port *);
+ void (*remoteport_delete)(struct nvme_fc_remote_port *);
+ int (*create_queue)(struct nvme_fc_local_port *,
+ unsigned int qidx, u16 qsize,
+ void **handle);
+ void (*delete_queue)(struct nvme_fc_local_port *,
+ unsigned int qidx, void *handle);
+ void (*poll_queue)(struct nvme_fc_local_port *, void *handle);
+ int (*ls_req)(struct nvme_fc_local_port *,
+ struct nvme_fc_remote_port *,
+ struct nvmefc_ls_req *);
+ int (*fcp_io)(struct nvme_fc_local_port *,
+ struct nvme_fc_remote_port *,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *);
+ void (*ls_abort)(struct nvme_fc_local_port *,
+ struct nvme_fc_remote_port *,
+ struct nvmefc_ls_req *);
+ void (*fcp_abort)(struct nvme_fc_local_port *,
+ struct nvme_fc_remote_port *,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *);
+
+ u32 max_hw_queues;
+ u16 max_sgl_segments;
+ u16 max_dif_sgl_segments;
+ u64 dma_boundary;
+
+ /* sizes of additional private data for data structures */
+ u32 local_priv_sz;
+ u32 remote_priv_sz;
+ u32 lsrqst_priv_sz;
+ u32 fcprqst_priv_sz;
+};
+
+
+/*
+ * Initiator/Host functions
+ */
+
+int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_port_template *template,
+ struct device *dev,
+ struct nvme_fc_local_port **lport_p);
+
+int nvme_fc_unregister_localport(struct nvme_fc_local_port *localport);
+
+int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
+ struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_remote_port **rport_p);
+
+int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport);
+
+void nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport);
+
+int nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *remoteport,
+ u32 dev_loss_tmo);
+
+
+/*
+ * *************** LLDD FC-NVME Target/Subsystem API ***************
+ *
+ * For FC LLDD's that are the NVME Subsystem role
+ *
+ * ******************************************************************
+ */
+
+/**
+ * struct nvmet_fc_port_info - port-specific ids and FC connection-specific
+ * data element used during NVME Subsystem role
+ * registrations
+ *
+ * Static fields describing the port being registered:
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ *
+ * Initialization values for dynamic port fields:
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ */
+struct nvmet_fc_port_info {
+ u64 node_name;
+ u64 port_name;
+ u32 port_id;
+};
+
+
+/**
+ * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC
+ * layer to represent the exchange context for
+ * a FC-NVME Link Service (LS).
+ *
+ * The structure is allocated by the LLDD whenever a LS Request is received
+ * from the FC link. The address of the structure is passed to the nvmet-fc
+ * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure
+ * will be passed back to the LLDD when the response is to be transmit.
+ * The LLDD is to use the address to map back to the LLDD exchange structure
+ * which maintains information such as the targetport the LS was received
+ * on, the remote FC NVME initiator that sent the LS, and any FC exchange
+ * context. Upon completion of the LS response transmit, the address of the
+ * structure will be passed back to the LS rsp done() routine, allowing the
+ * nvmet-fc layer to release dma resources. Upon completion of the done()
+ * routine, no further access will be made by the nvmet-fc layer and the
+ * LLDD can de-allocate the structure.
+ *
+ * Field initialization:
+ * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that
+ * is valid in the structure.
+ *
+ * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc
+ * layer will fully set the fields in order to specify the response
+ * payload buffer and its length as well as the done routine to be called
+ * upon compeletion of the transmit. The nvmet-fc layer will also set a
+ * private pointer for its own use in the done routine.
+ *
+ * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp
+ * entrypoint.
+ * @rspbuf: pointer to the LS response buffer
+ * @rspdma: PCI DMA address of the LS response buffer
+ * @rsplen: Length, in bytes, of the LS response buffer
+ * @done: The callback routine the LLDD is to invoke upon completion of
+ * transmitting the LS response. req argument is the pointer to
+ * the original ls request.
+ * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used
+ * as part of the NVMET-FC processing. The LLDD is not to access
+ * this pointer.
+ */
+struct nvmefc_tgt_ls_req {
+ void *rspbuf;
+ dma_addr_t rspdma;
+ u16 rsplen;
+
+ void (*done)(struct nvmefc_tgt_ls_req *req);
+ void *nvmet_fc_private; /* LLDD is not to access !! */
+};
+
+/* Operations that NVME-FC layer may request the LLDD to perform for FCP */
+enum {
+ NVMET_FCOP_READDATA = 1, /* xmt data to initiator */
+ NVMET_FCOP_WRITEDATA = 2, /* xmt data from initiator */
+ NVMET_FCOP_READDATA_RSP = 3, /* xmt data to initiator and send
+ * rsp as well
+ */
+ NVMET_FCOP_RSP = 4, /* send rsp frame */
+};
+
+/**
+ * struct nvmefc_tgt_fcp_req - Structure used between LLDD and NVMET-FC
+ * layer to represent the exchange context and
+ * the specific FC-NVME IU operation(s) to perform
+ * for a FC-NVME FCP IO.
+ *
+ * Structure used between LLDD and nvmet-fc layer to represent the exchange
+ * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related
+ * memory transfers, and its assocated cqe transfer).
+ *
+ * The structure is allocated by the LLDD whenever a FCP CMD IU is received
+ * from the FC link. The address of the structure is passed to the nvmet-fc
+ * layer via the nvmet_fc_rcv_fcp_req() call. The address of the structure
+ * will be passed back to the LLDD for the data operations and transmit of
+ * the response. The LLDD is to use the address to map back to the LLDD
+ * exchange structure which maintains information such as the targetport
+ * the FCP I/O was received on, the remote FC NVME initiator that sent the
+ * FCP I/O, and any FC exchange context. Upon completion of the FCP target
+ * operation, the address of the structure will be passed back to the FCP
+ * op done() routine, allowing the nvmet-fc layer to release dma resources.
+ * Upon completion of the done() routine for either RSP or ABORT ops, no
+ * further access will be made by the nvmet-fc layer and the LLDD can
+ * de-allocate the structure.
+ *
+ * Field initialization:
+ * At the time of the nvmet_fc_rcv_fcp_req() call, there is no content that
+ * is valid in the structure.
+ *
+ * When the structure is used for an FCP target operation, the nvmet-fc
+ * layer will fully set the fields in order to specify the scattergather
+ * list, the transfer length, as well as the done routine to be called
+ * upon compeletion of the operation. The nvmet-fc layer will also set a
+ * private pointer for its own use in the done routine.
+ *
+ * Values set by the NVMET-FC layer prior to calling the LLDD fcp_op
+ * entrypoint.
+ * @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx)
+ * @hwqid: Specifies the hw queue index (0..N-1, where N is the
+ * max_hw_queues value from the LLD's nvmet_fc_target_template)
+ * that the operation is to use.
+ * @offset: Indicates the DATA_OUT/DATA_IN payload offset to be tranferred.
+ * Field is only valid on WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @timeout: amount of time, in seconds, to wait for a response from the NVME
+ * host. A value of 0 is an infinite wait.
+ * Valid only for the following ops:
+ * WRITEDATA: caps the wait for data reception
+ * READDATA_RSP & RSP: caps wait for FCP_CONF reception (if used)
+ * @transfer_length: the length, in bytes, of the DATA_OUT or DATA_IN payload
+ * that is to be transferred.
+ * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @ba_rjt: Contains the BA_RJT payload that is to be transferred.
+ * Valid only for the NVMET_FCOP_BA_RJT op.
+ * @sg: Scatter/gather list for the DATA_OUT/DATA_IN payload data.
+ * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @sg_cnt: Number of valid entries in the scatter/gather list.
+ * Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @rspaddr: pointer to the FCP RSP IU buffer to be transmit
+ * Used by RSP and READDATA_RSP ops
+ * @rspdma: PCI DMA address of the FCP RSP IU buffer
+ * Used by RSP and READDATA_RSP ops
+ * @rsplen: Length, in bytes, of the FCP RSP IU buffer
+ * Used by RSP and READDATA_RSP ops
+ * @done: The callback routine the LLDD is to invoke upon completion of
+ * the operation. req argument is the pointer to the original
+ * FCP subsystem op request.
+ * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used
+ * as part of the NVMET-FC processing. The LLDD is not to
+ * reference this field.
+ *
+ * Values set by the LLDD indicating completion status of the FCP operation.
+ * Must be set prior to calling the done() callback.
+ * @transferred_length: amount of DATA_OUT payload data received by a
+ * a WRITEDATA operation. If not a WRITEDATA operation, value must
+ * be set to 0. Should equal transfer_length on success.
+ * @fcp_error: status of the FCP operation. Must be 0 on success; on failure
+ * must be a NVME_SC_FC_xxxx value.
+ */
+struct nvmefc_tgt_fcp_req {
+ u8 op;
+ u16 hwqid;
+ u32 offset;
+ u32 timeout;
+ u32 transfer_length;
+ struct fc_ba_rjt ba_rjt;
+ struct scatterlist *sg;
+ int sg_cnt;
+ void *rspaddr;
+ dma_addr_t rspdma;
+ u16 rsplen;
+
+ void (*done)(struct nvmefc_tgt_fcp_req *);
+
+ void *nvmet_fc_private; /* LLDD is not to access !! */
+
+ u32 transferred_length;
+ int fcp_error;
+};
+
+
+/* Target Features (Bit fields) LLDD supports */
+enum {
+ NVMET_FCTGTFEAT_READDATA_RSP = (1 << 0),
+ /* Bit 0: supports the NVMET_FCPOP_READDATA_RSP op, which
+ * sends (the last) Read Data sequence followed by the RSP
+ * sequence in one LLDD operation. Errors during Data
+ * sequence transmit must not allow RSP sequence to be sent.
+ */
+ NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1),
+ /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
+ * in a non-isr context, allowing the transport to finish
+ * op completion in the calling context. When 1, the LLDD
+ * is calling the cmd rcv handler in an ISR context,
+ * requiring the transport to transition to a workqueue
+ * for op completion.
+ */
+ NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2),
+ /* Bit 3: When 0, the LLDD is calling the op done handler
+ * in a non-isr context, allowing the transport to finish
+ * op completion in the calling context. When 1, the LLDD
+ * is calling the op done handler in an ISR context,
+ * requiring the transport to transition to a workqueue
+ * for op completion.
+ */
+};
+
+
+/**
+ * struct nvmet_fc_target_port - structure used between NVME-FC transport and
+ * a LLDD to reference a local NVME subsystem port.
+ * Allocated/created by the nvme_fc_register_targetport()
+ * transport interface.
+ *
+ * Fields with static values for the port. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_num: NVME-FC transport subsytem port number
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @private: pointer to memory allocated alongside the local port
+ * structure that is specifically for the LLDD to use.
+ * The length of the buffer corresponds to the target_priv_sz
+ * value specified in the nvme_fc_target_template supplied by
+ * the LLDD.
+ *
+ * Fields with dynamic values. Values may change base on link state. LLDD
+ * may reference fields directly to change them. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ * @port_state: Operational state of the port.
+ */
+struct nvmet_fc_target_port {
+ /* static/read-only fields */
+ u32 port_num;
+ u64 node_name;
+ u64 port_name;
+
+ void *private;
+
+ /* dynamic fields */
+ u32 port_id;
+ enum nvme_fc_obj_state port_state;
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
+
+/**
+ * struct nvmet_fc_target_template - structure containing static entrypoints
+ * and operational parameters for an LLDD that supports NVME
+ * subsystem behavior. Passed by reference in port
+ * registrations. NVME-FC transport remembers template
+ * reference and may access it during runtime operation.
+ *
+ * Subsystem/Target Transport Entrypoints/Parameters:
+ *
+ * @targetport_delete: The LLDD initiates deletion of a targetport via
+ * nvmet_fc_unregister_targetport(). However, the teardown is
+ * asynchronous. This routine is called upon the completion of the
+ * teardown to inform the LLDD that the targetport has been deleted.
+ * Entrypoint is Mandatory.
+ *
+ * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service.
+ * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange
+ * structure specified in the nvmet_fc_rcv_ls_req() call made when
+ * the LS request was received. The structure will fully describe
+ * the buffers for the response payload and the dma address of the
+ * payload. The LLDD is to transmit the response (or return a non-zero
+ * errno status), and upon completion of the transmit, call the
+ * "done" routine specified in the nvmefc_tgt_ls_req structure
+ * (argument to done is the ls reqwuest structure itself).
+ * After calling the done routine, the LLDD shall consider the
+ * LS handling complete and the nvmefc_tgt_ls_req structure may
+ * be freed/released.
+ * Entrypoint is Mandatory.
+ *
+ * @fcp_op: Called to perform a data transfer or transmit a response.
+ * The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
+ * exchange structure specified in the nvmet_fc_rcv_fcp_req() call
+ * made when the FCP CMD IU was received. The op field in the
+ * structure shall indicate the operation for the LLDD to perform
+ * relative to the io.
+ * NVMET_FCOP_READDATA operation: the LLDD is to send the
+ * payload data (described by sglist) to the host in 1 or
+ * more FC sequences (preferrably 1). Note: the fc-nvme layer
+ * may call the READDATA operation multiple times for longer
+ * payloads.
+ * NVMET_FCOP_WRITEDATA operation: the LLDD is to receive the
+ * payload data (described by sglist) from the host via 1 or
+ * more FC sequences (preferrably 1). The LLDD is to generate
+ * the XFER_RDY IU(s) corresponding to the data being requested.
+ * Note: the FC-NVME layer may call the WRITEDATA operation
+ * multiple times for longer payloads.
+ * NVMET_FCOP_READDATA_RSP operation: the LLDD is to send the
+ * payload data (described by sglist) to the host in 1 or
+ * more FC sequences (preferrably 1). If an error occurs during
+ * payload data transmission, the LLDD is to set the
+ * nvmefc_tgt_fcp_req fcp_error and transferred_length field, then
+ * consider the operation complete. On error, the LLDD is to not
+ * transmit the FCP_RSP iu. If all payload data is transferred
+ * successfully, the LLDD is to update the nvmefc_tgt_fcp_req
+ * transferred_length field and may subsequently transmit the
+ * FCP_RSP iu payload (described by rspbuf, rspdma, rsplen).
+ * If FCP_CONF is supported, the LLDD is to await FCP_CONF
+ * reception to confirm the RSP reception by the host. The LLDD
+ * may retramsit the FCP_RSP iu if necessary per FC-NVME. Upon
+ * transmission of the FCP_RSP iu if FCP_CONF is not supported,
+ * or upon success/failure of FCP_CONF if it is supported, the
+ * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
+ * consider the operation complete.
+ * NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload
+ * (described by rspbuf, rspdma, rsplen). If FCP_CONF is
+ * supported, the LLDD is to await FCP_CONF reception to confirm
+ * the RSP reception by the host. The LLDD may retramsit the
+ * FCP_RSP iu if FCP_CONF is not received per FC-NVME. Upon
+ * transmission of the FCP_RSP iu if FCP_CONF is not supported,
+ * or upon success/failure of FCP_CONF if it is supported, the
+ * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
+ * consider the operation complete.
+ * Upon completing the indicated operation, the LLDD is to set the
+ * status fields for the operation (tranferred_length and fcp_error
+ * status) in the request, then call the "done" routine
+ * indicated in the fcp request. After the operation completes,
+ * regardless of whether the FCP_RSP iu was successfully transmit,
+ * the LLDD-supplied exchange structure must remain valid until the
+ * transport calls the fcp_req_release() callback to return ownership
+ * of the exchange structure back to the LLDD so that it may be used
+ * for another fcp command.
+ * Note: when calling the done routine for READDATA or WRITEDATA
+ * operations, the fc-nvme layer may immediate convert, in the same
+ * thread and before returning to the LLDD, the fcp operation to
+ * the next operation for the fcp io and call the LLDDs fcp_op
+ * call again. If fields in the fcp request are to be accessed post
+ * the done call, the LLDD should save their values prior to calling
+ * the done routine, and inspect the save values after the done
+ * routine.
+ * Returns 0 on success, -<errno> on failure (Ex: -EIO)
+ * Entrypoint is Mandatory.
+ *
+ * @fcp_abort: Called by the transport to abort an active command.
+ * The command may be in-between operations (nothing active in LLDD)
+ * or may have an active WRITEDATA operation pending. The LLDD is to
+ * initiate the ABTS process for the command and return from the
+ * callback. The ABTS does not need to be complete on the command.
+ * The fcp_abort callback inherently cannot fail. After the
+ * fcp_abort() callback completes, the transport will wait for any
+ * outstanding operation (if there was one) to complete, then will
+ * call the fcp_req_release() callback to return the command's
+ * exchange context back to the LLDD.
+ * Entrypoint is Mandatory.
+ *
+ * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
+ * to the LLDD after all operations on the fcp operation are complete.
+ * This may be due to the command completing or upon completion of
+ * abort cleanup.
+ * Entrypoint is Mandatory.
+ *
+ * @defer_rcv: Called by the transport to signal the LLLD that it has
+ * begun processing of a previously received NVME CMD IU. The LLDD
+ * is now free to re-use the rcv buffer associated with the
+ * nvmefc_tgt_fcp_req.
+ * Entrypoint is Optional.
+ *
+ * @max_hw_queues: indicates the maximum number of hw queues the LLDD
+ * supports for cpu affinitization.
+ * Value is Mandatory. Must be at least 1.
+ *
+ * @max_sgl_segments: indicates the maximum number of sgl segments supported
+ * by the LLDD
+ * Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @max_dif_sgl_segments: indicates the maximum number of sgl segments
+ * supported by the LLDD for DIF operations.
+ * Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @dma_boundary: indicates the dma address boundary where dma mappings
+ * will be split across.
+ * Value is Mandatory. Typical value is 0xFFFFFFFF to split across
+ * 4Gig address boundarys
+ *
+ * @target_features: The LLDD sets bits in this field to correspond to
+ * optional features that are supported by the LLDD.
+ * Refer to the NVMET_FCTGTFEAT_xxx values.
+ * Value is Mandatory. Allowed to be zero.
+ *
+ * @target_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like fc nvme layer to allocate on the LLDD's
+ * behalf whenever a targetport is allocated. The additional memory
+ * area solely for the of the LLDD and its location is specified by
+ * the targetport->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ */
+struct nvmet_fc_target_template {
+ void (*targetport_delete)(struct nvmet_fc_target_port *tgtport);
+ int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_ls_req *tls_req);
+ int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+
+ u32 max_hw_queues;
+ u16 max_sgl_segments;
+ u16 max_dif_sgl_segments;
+ u64 dma_boundary;
+
+ u32 target_features;
+
+ u32 target_priv_sz;
+};
+
+
+int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo,
+ struct nvmet_fc_target_template *template,
+ struct device *dev,
+ struct nvmet_fc_target_port **tgtport_p);
+
+int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport);
+
+int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_ls_req *lsreq,
+ void *lsreqbuf, u32 lsreqbuf_len);
+
+int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq,
+ void *cmdiubuf, u32 cmdiubuf_len);
+
+void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+
+#endif /* _NVME_FC_DRIVER_H */
--- /dev/null
+/*
+ * Copyright (c) 2016 Avago Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ *
+ */
+
+/*
+ * This file contains definitions relative to FC-NVME r1.14 (16-020vB).
+ * The fcnvme_lsdesc_cr_assoc_cmd struct reflects expected r1.16 content.
+ */
+
+#ifndef _NVME_FC_H
+#define _NVME_FC_H 1
+
+
+#define NVME_CMD_SCSI_ID 0xFD
+#define NVME_CMD_FC_ID FC_TYPE_NVME
+
+/* FC-NVME Cmd IU Flags */
+#define FCNVME_CMD_FLAGS_DIRMASK 0x03
+#define FCNVME_CMD_FLAGS_WRITE 0x01
+#define FCNVME_CMD_FLAGS_READ 0x02
+
+struct nvme_fc_cmd_iu {
+ __u8 scsi_id;
+ __u8 fc_id;
+ __be16 iu_len;
+ __u8 rsvd4[3];
+ __u8 flags;
+ __be64 connection_id;
+ __be32 csn;
+ __be32 data_len;
+ struct nvme_command sqe;
+ __be32 rsvd88[2];
+};
+
+#define NVME_FC_SIZEOF_ZEROS_RSP 12
+
+enum {
+ FCNVME_SC_SUCCESS = 0,
+ FCNVME_SC_INVALID_FIELD = 1,
+ FCNVME_SC_INVALID_CONNID = 2,
+};
+
+struct nvme_fc_ersp_iu {
+ __u8 status_code;
+ __u8 rsvd1;
+ __be16 iu_len;
+ __be32 rsn;
+ __be32 xfrd_len;
+ __be32 rsvd12;
+ struct nvme_completion cqe;
+ /* for now - no additional payload */
+};
+
+
+/* FC-NVME Link Services */
+enum {
+ FCNVME_LS_RSVD = 0,
+ FCNVME_LS_RJT = 1,
+ FCNVME_LS_ACC = 2,
+ FCNVME_LS_CREATE_ASSOCIATION = 3,
+ FCNVME_LS_CREATE_CONNECTION = 4,
+ FCNVME_LS_DISCONNECT = 5,
+};
+
+/* FC-NVME Link Service Descriptors */
+enum {
+ FCNVME_LSDESC_RSVD = 0x0,
+ FCNVME_LSDESC_RQST = 0x1,
+ FCNVME_LSDESC_RJT = 0x2,
+ FCNVME_LSDESC_CREATE_ASSOC_CMD = 0x3,
+ FCNVME_LSDESC_CREATE_CONN_CMD = 0x4,
+ FCNVME_LSDESC_DISCONN_CMD = 0x5,
+ FCNVME_LSDESC_CONN_ID = 0x6,
+ FCNVME_LSDESC_ASSOC_ID = 0x7,
+};
+
+
+/* ********** start of Link Service Descriptors ********** */
+
+
+/*
+ * fills in length of a descriptor. Struture minus descriptor header
+ */
+static inline __be32 fcnvme_lsdesc_len(size_t sz)
+{
+ return cpu_to_be32(sz - (2 * sizeof(u32)));
+}
+
+struct fcnvme_ls_rqst_w0 {
+ u8 ls_cmd; /* FCNVME_LS_xxx */
+ u8 zeros[3];
+};
+
+/* FCNVME_LSDESC_RQST */
+struct fcnvme_lsdesc_rqst {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 rsvd12;
+};
+
+/* FC-NVME LS RJT reason_code values */
+enum fcnvme_ls_rjt_reason {
+ FCNVME_RJT_RC_NONE = 0,
+ /* no reason - not to be sent */
+
+ FCNVME_RJT_RC_INVAL = 0x01,
+ /* invalid NVMe_LS command code */
+
+ FCNVME_RJT_RC_LOGIC = 0x03,
+ /* logical error */
+
+ FCNVME_RJT_RC_UNAB = 0x09,
+ /* unable to perform command request */
+
+ FCNVME_RJT_RC_UNSUP = 0x0b,
+ /* command not supported */
+
+ FCNVME_RJT_RC_INPROG = 0x0e,
+ /* command already in progress */
+
+ FCNVME_RJT_RC_INV_ASSOC = 0x40,
+ /* Invalid Association ID*/
+
+ FCNVME_RJT_RC_INV_CONN = 0x41,
+ /* Invalid Connection ID*/
+
+ FCNVME_RJT_RC_VENDOR = 0xff,
+ /* vendor specific error */
+};
+
+/* FC-NVME LS RJT reason_explanation values */
+enum fcnvme_ls_rjt_explan {
+ FCNVME_RJT_EXP_NONE = 0x00,
+ /* No additional explanation */
+
+ FCNVME_RJT_EXP_OXID_RXID = 0x17,
+ /* invalid OX_ID-RX_ID combination */
+
+ FCNVME_RJT_EXP_INSUF_RES = 0x29,
+ /* insufficient resources */
+
+ FCNVME_RJT_EXP_UNAB_DATA = 0x2a,
+ /* unable to supply requested data */
+
+ FCNVME_RJT_EXP_INV_LEN = 0x2d,
+ /* Invalid payload length */
+};
+
+/* FCNVME_LSDESC_RJT */
+struct fcnvme_lsdesc_rjt {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ u8 rsvd8;
+
+ /*
+ * Reject reason and explanaction codes are generic
+ * to ELs's from LS-3.
+ */
+ u8 reason_code; /* fcnvme_ls_rjt_reason */
+ u8 reason_explanation; /* fcnvme_ls_rjt_explan */
+
+ u8 vendor;
+ __be32 rsvd12;
+};
+
+
+#define FCNVME_ASSOC_HOSTNQN_LEN 256
+#define FCNVME_ASSOC_SUBNQN_LEN 256
+
+/* FCNVME_LSDESC_CREATE_ASSOC_CMD */
+struct fcnvme_lsdesc_cr_assoc_cmd {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ __be16 ersp_ratio;
+ __be16 rsvd10;
+ __be32 rsvd12[9];
+ __be16 cntlid;
+ __be16 sqsize;
+ __be32 rsvd52;
+ uuid_t hostid;
+ u8 hostnqn[FCNVME_ASSOC_HOSTNQN_LEN];
+ u8 subnqn[FCNVME_ASSOC_SUBNQN_LEN];
+ __be32 rsvd584[108]; /* pad to 1016 bytes,
+ * which makes overall LS rqst
+ * payload 1024 bytes
+ */
+};
+
+#define FCNVME_LSDESC_CRA_CMD_DESC_MINLEN \
+ offsetof(struct fcnvme_lsdesc_cr_assoc_cmd, rsvd584)
+
+#define FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN \
+ (FCNVME_LSDESC_CRA_CMD_DESC_MINLEN - \
+ offsetof(struct fcnvme_lsdesc_cr_assoc_cmd, ersp_ratio))
+
+
+
+/* FCNVME_LSDESC_CREATE_CONN_CMD */
+struct fcnvme_lsdesc_cr_conn_cmd {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ __be16 ersp_ratio;
+ __be16 rsvd10;
+ __be32 rsvd12[9];
+ __be16 qid;
+ __be16 sqsize;
+ __be32 rsvd52;
+};
+
+/* Disconnect Scope Values */
+enum {
+ FCNVME_DISCONN_ASSOCIATION = 0,
+ FCNVME_DISCONN_CONNECTION = 1,
+};
+
+/* FCNVME_LSDESC_DISCONN_CMD */
+struct fcnvme_lsdesc_disconn_cmd {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ u8 rsvd8[3];
+ /* note: scope is really a 1 bit field */
+ u8 scope; /* FCNVME_DISCONN_xxx */
+ __be32 rsvd12;
+ __be64 id;
+};
+
+/* FCNVME_LSDESC_CONN_ID */
+struct fcnvme_lsdesc_conn_id {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ __be64 connection_id;
+};
+
+/* FCNVME_LSDESC_ASSOC_ID */
+struct fcnvme_lsdesc_assoc_id {
+ __be32 desc_tag; /* FCNVME_LSDESC_xxx */
+ __be32 desc_len;
+ __be64 association_id;
+};
+
+/* r_ctl values */
+enum {
+ FCNVME_RS_RCTL_DATA = 1,
+ FCNVME_RS_RCTL_XFER_RDY = 5,
+ FCNVME_RS_RCTL_RSP = 8,
+};
+
+
+/* ********** start of Link Services ********** */
+
+
+/* FCNVME_LS_RJT */
+struct fcnvme_ls_rjt {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_rqst rqst;
+ struct fcnvme_lsdesc_rjt rjt;
+};
+
+/* FCNVME_LS_ACC */
+struct fcnvme_ls_acc_hdr {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_rqst rqst;
+ /* Followed by cmd-specific ACC descriptors, see next definitions */
+};
+
+/* FCNVME_LS_CREATE_ASSOCIATION */
+struct fcnvme_ls_cr_assoc_rqst {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_cr_assoc_cmd assoc_cmd;
+};
+
+#define FCNVME_LSDESC_CRA_RQST_MINLEN \
+ (offsetof(struct fcnvme_ls_cr_assoc_rqst, assoc_cmd) + \
+ FCNVME_LSDESC_CRA_CMD_DESC_MINLEN)
+
+#define FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN \
+ FCNVME_LSDESC_CRA_CMD_DESC_MINLEN
+
+
+struct fcnvme_ls_cr_assoc_acc {
+ struct fcnvme_ls_acc_hdr hdr;
+ struct fcnvme_lsdesc_assoc_id associd;
+ struct fcnvme_lsdesc_conn_id connectid;
+};
+
+
+/* FCNVME_LS_CREATE_CONNECTION */
+struct fcnvme_ls_cr_conn_rqst {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_assoc_id associd;
+ struct fcnvme_lsdesc_cr_conn_cmd connect_cmd;
+};
+
+struct fcnvme_ls_cr_conn_acc {
+ struct fcnvme_ls_acc_hdr hdr;
+ struct fcnvme_lsdesc_conn_id connectid;
+};
+
+/* FCNVME_LS_DISCONNECT */
+struct fcnvme_ls_disconnect_rqst {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_assoc_id associd;
+ struct fcnvme_lsdesc_disconn_cmd discon_cmd;
+};
+
+struct fcnvme_ls_disconnect_acc {
+ struct fcnvme_ls_acc_hdr hdr;
+};
+
+
+/*
+ * Yet to be defined in FC-NVME:
+ */
+#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */
+#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
+#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
+
+/*
+ * TRADDR string must be of form "nn-<16hexdigits>:pn-<16hexdigits>"
+ * the string is allowed to be specified with or without a "0x" prefix
+ * infront of the <16hexdigits>. Without is considered the "min" string
+ * and with is considered the "max" string. The hexdigits may be upper
+ * or lower case.
+ */
+#define NVME_FC_TRADDR_NNLEN 3 /* "?n-" */
+#define NVME_FC_TRADDR_OXNNLEN 5 /* "?n-0x" */
+#define NVME_FC_TRADDR_HEXNAMELEN 16
+#define NVME_FC_TRADDR_MINLENGTH \
+ (2 * (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1)
+#define NVME_FC_TRADDR_MAXLENGTH \
+ (2 * (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1)
+#define NVME_FC_TRADDR_MIN_PN_OFFSET \
+ (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1)
+#define NVME_FC_TRADDR_MAX_PN_OFFSET \
+ (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1)
+
+
+#endif /* _NVME_FC_H */
--- /dev/null
+#ifndef _LINUX_PCI_H
+#define _LINUX_PCI_H
+
+#include "../../compat/config.h"
+
+#include <linux/version.h>
+#include_next <linux/pci.h>
+
+#ifndef HAVE_PCI_PHYSFN
+#define pci_physfn LINUX_BACKPORT(pci_physfn)
+static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_IOV
+ if (dev->is_virtfn)
+ dev = dev->physfn;
+#endif
+ return dev;
+}
+#endif /* HAVE_PCI_PHYSFN */
+
+#ifndef HAVE_PCI_NUM_VF
+#define pci_num_vf LINUX_BACKPORT(pci_num_vf)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+int pci_num_vf(struct pci_dev *pdev);
+#else
+static inline int pci_num_vf(struct pci_dev *pdev)
+{
+ return 0;
+}
+#endif
+#endif
+
+#ifndef HAVE_PCI_VFS_ASSIGNED
+#define pci_vfs_assigned LINUX_BACKPORT(pci_vfs_assigned)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+int pci_vfs_assigned(struct pci_dev *pdev);
+#else
+static inline int pci_vfs_assigned(struct pci_dev *pdev)
+{
+ return 0;
+}
+#endif
+#endif
+
+#ifndef HAVE_PCI_SRIOV_GET_TOTALVFS
+#define pci_sriov_get_totalvfs LINUX_BACKPORT(pci_sriov_get_totalvfs)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
+int pci_sriov_get_totalvfs(struct pci_dev *pdev);
+#else
+static inline int pci_sriov_get_totalvfs(struct pci_dev *pdev)
+{
+ return 0;
+}
+#endif
+#endif
+
+#ifndef HAVE_PCI_IRQ_GET_AFFINITY
+static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
+ int vec)
+{
+ return cpu_possible_mask;
+}
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \
+ (defined(RHEL_MAJOR) && RHEL_MAJOR -0 == 7 && RHEL_MINOR -0 >= 2)
+#ifndef HAVE_PCI_IRQ_GET_NODE
+static inline int pci_irq_get_node(struct pci_dev *pdev, int vec)
+{
+#ifdef CONFIG_PCI_MSI
+ const struct cpumask *mask;
+
+ mask = pci_irq_get_affinity(pdev, vec);
+ if (mask)
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+ return local_memory_node(cpu_to_node(cpumask_first(mask)));
+#else
+ return cpu_to_node(cpumask_first(mask));
+#endif
+ return dev_to_node(&pdev->dev);
+#else /* CONFIG_PCI_MSI */
+ return first_online_node;
+#endif /* CONFIG_PCI_MSI */
+}
+#endif /* pci_irq_get_node */
+#endif
+
+#ifdef CONFIG_PCI
+#ifndef HAVE_PCI_REQUEST_MEM_REGIONS
+static inline int
+pci_request_mem_regions(struct pci_dev *pdev, const char *name)
+{
+ return pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM), name);
+}
+#endif
+
+#ifndef HAVE_PCI_RELEASE_MEM_REGIONS
+static inline void
+pci_release_mem_regions(struct pci_dev *pdev)
+{
+ return pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+}
+#endif
+#endif /* CONFIG_PCI */
+
+#endif /* _LINUX_PCI_H */
--- /dev/null
+#ifndef _COMPAT_LINUX_PM_QOS_H
+#define _COMPAT_LINUX_PM_QOS_H 1
+
+#include "../../compat/config.h"
+#include <linux/slab.h>
+
+#include_next <linux/pm_qos.h>
+
+#if !defined(HAVE_PM_QOS_UPDATE_USER_LATENCY_TOLERANCE_EXPORTED)
+#if defined(HAVE_DEV_PM_QOS_RESUME_LATENCY) && \
+ defined(HAVE_DEV_PM_QOS_LATENCY_TOLERANCE) && \
+ defined(HAVE_PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
+#define dev_pm_qos_drop_user_request LINUX_BACKPORT(dev_pm_qos_drop_user_request)
+static inline void dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ struct dev_pm_qos_request *req = NULL;
+
+ switch(type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ req = dev->power.qos->resume_latency_req;
+ dev->power.qos->resume_latency_req = NULL;
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ req = dev->power.qos->latency_tolerance_req;
+ dev->power.qos->latency_tolerance_req = NULL;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ req = dev->power.qos->flags_req;
+ dev->power.qos->flags_req = NULL;
+ break;
+ }
+ dev_pm_qos_remove_request(req);
+ kfree(req);
+}
+
+#define dev_pm_qos_update_user_latency_tolerance LINUX_BACKPORT(dev_pm_qos_update_user_latency_tolerance)
+static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+{
+
+ int ret;
+
+ /*
+ * each original private function on kernel's function
+ * is replaced here by an exported version of the function
+ * (which calls the private one under a mutex lock).
+ */
+
+ if (IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req) {
+ struct dev_pm_qos_request *req;
+
+ if (val < 0) {
+ if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
+ ret = 0;
+ else
+ ret = -EINVAL;
+ goto out;
+ }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
+ if (ret < 0) {
+ kfree(req);
+ goto out;
+ }
+ dev->power.qos->latency_tolerance_req = req;
+ } else {
+ if (val < 0) {
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
+ ret = 0;
+ } else {
+ ret = dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
+ }
+ }
+
+out:
+ return ret;
+}
+#endif
+#endif
+#endif /* _COMPAT_LINUX_PM_QOS_H */
-#ifndef _MLNX_LINUX_REFCOUNT_H
-#define _MLNX_LINUX_REFCOUNT_H
+#ifndef __COMPAT_LINUX_REFCOUNT_H
+#define __COMPAT_LINUX_REFCOUNT_H
#include "../../compat/config.h"
#endif /* HAVE_REFCOUNT */
-#endif /* _MLNX_LINUX_REFCOUNT_H */
+#endif /* __COMPAT_LINUX_REFCOUNT_H */
--- /dev/null
+#ifndef _COMPAT_LINUX_SIZES_H
+#define _COMPAT_LINUX_SIZES_H 1
+
+#include "../../compat/config.h"
+
+#ifdef HAVE_SIZES_H
+
+#include_next <linux/sizes.h>
+
+#else
+
+#define SZ_1 0x00000001
+#define SZ_2 0x00000002
+#define SZ_4 0x00000004
+#define SZ_8 0x00000008
+#define SZ_16 0x00000010
+#define SZ_32 0x00000020
+#define SZ_64 0x00000040
+#define SZ_128 0x00000080
+#define SZ_256 0x00000100
+#define SZ_512 0x00000200
+
+#define SZ_1K 0x00000400
+#define SZ_2K 0x00000800
+#define SZ_4K 0x00001000
+#define SZ_8K 0x00002000
+#define SZ_16K 0x00004000
+#define SZ_32K 0x00008000
+#define SZ_64K 0x00010000
+#define SZ_128K 0x00020000
+#define SZ_256K 0x00040000
+#define SZ_512K 0x00080000
+
+#define SZ_1M 0x00100000
+#define SZ_2M 0x00200000
+#define SZ_4M 0x00400000
+#define SZ_8M 0x00800000
+#define SZ_16M 0x01000000
+#define SZ_32M 0x02000000
+#define SZ_64M 0x04000000
+#define SZ_128M 0x08000000
+#define SZ_256M 0x10000000
+#define SZ_512M 0x20000000
+
+#define SZ_1G 0x40000000
+#define SZ_2G 0x80000000
+
+#endif
+
+#endif /* _COMPAT_LINUX_SIZES_H */
--- /dev/null
+#ifndef COMPAT_LINUX_SLAB_H
+#define COMPAT_LINUX_SLAB_H
+
+#include "../../compat/config.h"
+
+#include_next <linux/slab.h>
+
+#ifndef HAVE_KMALLOC_ARRAY
+/**
+ * kmalloc_array - allocate memory for an array.
+ * @n: number of elements.
+ * @size: element size.
+ * @flags: the type of memory to allocate (see kmalloc).
+ */
+static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+ return __kmalloc(n * size, flags);
+}
+#endif
+
+#ifndef HAVE_KMALLOC_ARRAY_NODE
+static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+ int node)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+ if (__builtin_constant_p(n) && __builtin_constant_p(size))
+ return kmalloc_node(n * size, flags, node);
+ return __kmalloc_node(n * size, flags, node);
+}
+#endif
+
+#ifndef HAVE_KCALLOC_NODE
+static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+{
+ return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
+}
+#endif
+
+/*
+ * W/A for old kernels that do not have this fix.
+ *
+ * commit 3942d29918522ba6a393c19388301ec04df429cd
+ * Author: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+ * Date: Tue Sep 8 15:00:50 2015 -0700
+ *
+ * mm/slab_common: allow NULL cache pointer in kmem_cache_destroy()
+ *
+*/
+static inline void compat_kmem_cache_destroy(struct kmem_cache *s)
+{
+ if (unlikely(!s))
+ return;
+
+ kmem_cache_destroy(s);
+}
+#define kmem_cache_destroy compat_kmem_cache_destroy
+
+#endif /* COMPAT_LINUX_SLAB_H */
--- /dev/null
+#ifndef _COMPAT_LINUX_T10_PI_H
+#define _COMPAT_LINUX_T10_PI_H 1
+
+#include "../../compat/config.h"
+
+#ifdef HAVE_T10_PI_H
+
+#include_next <linux/t10-pi.h>
+
+#else
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+
+/*
+ * A T10 PI-capable target device can be formatted with different
+ * protection schemes. Currently 0 through 3 are defined:
+ *
+ * Type 0 is regular (unprotected) I/O
+ *
+ * Type 1 defines the contents of the guard and reference tags
+ *
+ * Type 2 defines the contents of the guard and reference tags and
+ * uses 32-byte commands to seed the latter
+ *
+ * Type 3 defines the contents of the guard tag only
+ */
+enum t10_dif_type {
+ T10_PI_TYPE0_PROTECTION = 0x0,
+ T10_PI_TYPE1_PROTECTION = 0x1,
+ T10_PI_TYPE2_PROTECTION = 0x2,
+ T10_PI_TYPE3_PROTECTION = 0x3,
+};
+
+/*
+ * T10 Protection Information tuple.
+ */
+struct t10_pi_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+#define T10_PI_APP_ESCAPE cpu_to_be16(0xffff)
+#define T10_PI_REF_ESCAPE cpu_to_be32(0xffffffff)
+
+extern const struct blk_integrity_profile t10_pi_type1_crc;
+extern const struct blk_integrity_profile t10_pi_type1_ip;
+extern const struct blk_integrity_profile t10_pi_type3_crc;
+extern const struct blk_integrity_profile t10_pi_type3_ip;
+
+#endif
+
+#endif /* _COMPAT_LINUX_T10_PI_H */
--- /dev/null
+#ifndef _COMPAT_LINUX_UUID_H
+#define _COMPAT_LINUX_UUID_H
+
+#include "../../compat/config.h"
+#include <linux/version.h>
+
+#include_next <linux/uuid.h>
+
+#if (defined(RHEL_MAJOR) && RHEL_MAJOR -0 == 7 && RHEL_MINOR -0 >= 2) || \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+
+#ifndef HAVE_UUID_GEN
+
+#define uuid_t uuid_be
+#define uuid_gen uuid_be_gen
+#define uuid_parse uuid_be_to_bin
+
+static inline void uuid_copy(uuid_t *dst, const uuid_t *src)
+{
+ memcpy(dst, src, sizeof(uuid_be));
+}
+
+#ifndef HAVE_UUID_BE_TO_BIN
+/*
+* The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
+* not including trailing NUL.
+*/
+#define UUID_STRING_LEN 36
+#define uuid_is_valid LINUX_BACKPORT(uuid_is_valid)
+bool __must_check uuid_is_valid(const char *uuid);
+
+#define uuid_le_index LINUX_BACKPORT(uuid_le_index)
+extern const u8 uuid_le_index[16];
+#define uuid_be_index LINUX_BACKPORT(uuid_be_index)
+extern const u8 uuid_be_index[16];
+
+#define uuid_le_to_bin LINUX_BACKPORT(uuid_le_to_bin)
+int uuid_le_to_bin(const char *uuid, uuid_le *u);
+#define uuid_be_to_bin LINUX_BACKPORT(uuid_be_to_bin)
+int uuid_be_to_bin(const char *uuid, uuid_be *u);
+
+#endif /* HAVE_UUID_BE_TO_BIN */
+
+#endif /* HAVE_UUID_GEN */
+
+#ifndef HAVE_UUID_EQUAL
+#define uuid_equal LINUX_BACKPORT(uuid_equal)
+static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2)
+{
+ return memcmp(u1, u2, sizeof(uuid_t)) == 0;
+}
+#endif
+
+#endif
+
+#endif /* _COMPAT_LINUX_UUID_H */