]> git.openfabrics.org - compat-rdma/compat.git/commitdiff
compat: Add kthread support for kernels <= 2.6.35 compat-2014-01-30
authorDennis Dalessandro <dennis.dalessandro@intel.com>
Tue, 24 Dec 2013 16:20:30 +0000 (11:20 -0500)
committerDennis Dalessandro <dennis.dalessandro@intel.com>
Thu, 2 Jan 2014 17:29:10 +0000 (12:29 -0500)
Signed-off by: Dennis Dalessandro (dennis.dalessandro@intel.com)

compat/Makefile
compat/kthread.c [new file with mode: 0644]
include/linux/kthread.h [new file with mode: 0644]

index a68f966da07ad7a25aa78e9d0dc81002f7877a7f..c50562f9d5662627620f110f636d6965a7e6b854 100644 (file)
@@ -23,9 +23,13 @@ compat-$(CONFIG_COMPAT_KERNEL_2_6_27) += compat-2.6.27.o
 compat-$(CONFIG_COMPAT_KERNEL_2_6_28) += compat-2.6.28.o
 compat-$(CONFIG_COMPAT_KERNEL_2_6_29) += compat-2.6.29.o
 compat-$(CONFIG_COMPAT_KERNEL_2_6_32) += compat-2.6.32.o
+
 compat-$(CONFIG_COMPAT_KERNEL_2_6_33) += compat-2.6.33.o
 compat-$(CONFIG_COMPAT_KERNEL_2_6_34) += compat-2.6.34.o
-compat-$(CONFIG_COMPAT_KERNEL_2_6_35) += compat-2.6.35.o
+compat-$(CONFIG_COMPAT_KERNEL_2_6_35) += \
+       compat-2.6.35.o \
+       kthread.o
+
 compat-$(CONFIG_COMPAT_KERNEL_2_6_36) += compat-2.6.36.o
 
 compat-$(CONFIG_COMPAT_KFIFO) += kfifo.o
diff --git a/compat/kthread.c b/compat/kthread.c
new file mode 100644 (file)
index 0000000..f1ee747
--- /dev/null
@@ -0,0 +1,177 @@
+/* Kernel thread helper functions.
+ *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
+ *
+ * Creation is done via kthreadd, so that we get a clean environment
+ * even if we're invoked from userspace (think modprobe, hotplug cpu,
+ * etc.).
+ */
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/cpuset.h>
+#include <linux/unistd.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/freezer.h>
+#include <trace/events/sched.h>
+
+void __init_kthread_worker(struct kthread_worker *worker,
+                               const char *name,
+                               struct lock_class_key *key)
+{
+       spin_lock_init(&worker->lock);
+       lockdep_set_class_and_name(&worker->lock, key, name);
+       INIT_LIST_HEAD(&worker->work_list);
+       worker->task = NULL;
+}
+EXPORT_SYMBOL_GPL(__init_kthread_worker);
+
+/**
+ * kthread_worker_fn - kthread function to process kthread_worker
+ * @worker_ptr: pointer to initialized kthread_worker
+ *
+ * This function can be used as @threadfn to kthread_create() or
+ * kthread_run() with @worker_ptr argument pointing to an initialized
+ * kthread_worker.  The started kthread will process work_list until
+ * the it is stopped with kthread_stop().  A kthread can also call
+ * this function directly after extra initialization.
+ *
+ * Different kthreads can be used for the same kthread_worker as long
+ * as there's only one kthread attached to it at any given time.  A
+ * kthread_worker without an attached kthread simply collects queued
+ * kthread_works.
+ */
+int kthread_worker_fn(void *worker_ptr)
+{
+       struct kthread_worker *worker = worker_ptr;
+       struct kthread_work *work;
+
+       WARN_ON(worker->task);
+       worker->task = current;
+repeat:
+       set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
+
+       if (kthread_should_stop()) {
+               __set_current_state(TASK_RUNNING);
+               spin_lock_irq(&worker->lock);
+               worker->task = NULL;
+               spin_unlock_irq(&worker->lock);
+               return 0;
+       }
+
+       work = NULL;
+       spin_lock_irq(&worker->lock);
+       if (!list_empty(&worker->work_list)) {
+               work = list_first_entry(&worker->work_list,
+                                       struct kthread_work, node);
+               list_del_init(&work->node);
+       }
+       spin_unlock_irq(&worker->lock);
+
+       if (work) {
+               __set_current_state(TASK_RUNNING);
+               work->func(work);
+               smp_wmb();      /* wmb worker-b0 paired with flush-b1 */
+               work->done_seq = work->queue_seq;
+               smp_mb();       /* mb worker-b1 paired with flush-b0 */
+               if (atomic_read(&work->flushing))
+                       wake_up_all(&work->done);
+       } else if (!freezing(current))
+               schedule();
+
+       try_to_freeze();
+       goto repeat;
+}
+EXPORT_SYMBOL_GPL(kthread_worker_fn);
+
+/**
+ * queue_kthread_work - queue a kthread_work
+ * @worker: target kthread_worker
+ * @work: kthread_work to queue
+ *
+ * Queue @work to work processor @task for async execution.  @task
+ * must have been created with kthread_worker_create().  Returns %true
+ * if @work was successfully queued, %false if it was already pending.
+ */
+bool queue_kthread_work(struct kthread_worker *worker,
+                       struct kthread_work *work)
+{
+       bool ret = false;
+       unsigned long flags;
+
+       spin_lock_irqsave(&worker->lock, flags);
+       if (list_empty(&work->node)) {
+               list_add_tail(&work->node, &worker->work_list);
+               work->queue_seq++;
+               if (likely(worker->task))
+                       wake_up_process(worker->task);
+               ret = true;
+       }
+       spin_unlock_irqrestore(&worker->lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(queue_kthread_work);
+
+/**
+ * flush_kthread_work - flush a kthread_work
+ * @work: work to flush
+ *
+ * If @work is queued or executing, wait for it to finish execution.
+ */
+void flush_kthread_work(struct kthread_work *work)
+{
+       int seq = work->queue_seq;
+
+       atomic_inc(&work->flushing);
+
+       /*
+        * mb flush-b0 paired with worker-b1, to make sure either
+        * worker sees the above increment or we see done_seq update.
+        */
+       smp_mb__after_atomic_inc();
+
+       /* A - B <= 0 tests whether B is in front of A regardless of overflow */
+       wait_event(work->done, seq - work->done_seq <= 0);
+       atomic_dec(&work->flushing);
+
+       /*
+        * rmb flush-b1 paired with worker-b0, to make sure our caller
+        * sees every change made by work->func().
+        */
+       smp_mb__after_atomic_dec();
+}
+EXPORT_SYMBOL_GPL(flush_kthread_work);
+
+struct kthread_flush_work {
+       struct kthread_work     work;
+       struct completion       done;
+};
+
+static void kthread_flush_work_fn(struct kthread_work *work)
+{
+       struct kthread_flush_work *fwork =
+               container_of(work, struct kthread_flush_work, work);
+       complete(&fwork->done);
+}
+
+/**
+ * flush_kthread_worker - flush all current works on a kthread_worker
+ * @worker: worker to flush
+ *
+ * Wait until all currently executing or pending works on @worker are
+ * finished.
+ */
+void flush_kthread_worker(struct kthread_worker *worker)
+{
+       struct kthread_flush_work fwork = {
+               KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
+               COMPLETION_INITIALIZER_ONSTACK(fwork.done),
+       };
+
+       queue_kthread_work(worker, &fwork.work);
+       wait_for_completion(&fwork.done);
+}
+EXPORT_SYMBOL_GPL(flush_kthread_worker);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
new file mode 100644 (file)
index 0000000..b9c7efa
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Simple work processor based on kthread.
+ *
+ * This provides easier way to make use of kthreads.  A kthread_work
+ * can be queued and flushed using queue/flush_kthread_work()
+ * respectively.  Queued kthread_works are processed by a kthread
+ * running kthread_worker_fn().
+ *
+ * A kthread_work can't be freed while it is executing.
+ */
+#ifndef BACKPORT_LINUX_KTHREAD_H
+#define BACKPORT_LINUX_KTHREAD_H
+
+#include <linux/version.h>
+
+#include_next <linux/kthread.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
+
+struct kthread_work;
+typedef void (*kthread_work_func_t)(struct kthread_work *work);
+
+struct kthread_worker {
+       spinlock_t              lock;
+       struct list_head        work_list;
+       struct task_struct      *task;
+};
+
+struct kthread_work {
+       struct list_head        node;
+       kthread_work_func_t     func;
+       wait_queue_head_t       done;
+       atomic_t                flushing;
+       int                     queue_seq;
+       int                     done_seq;
+};
+
+#define KTHREAD_WORKER_INIT(worker)    {                               \
+       .lock = __SPIN_LOCK_UNLOCKED((worker).lock),                    \
+       .work_list = LIST_HEAD_INIT((worker).work_list),                \
+       }
+
+#define KTHREAD_WORK_INIT(work, fn)    {                               \
+       .node = LIST_HEAD_INIT((work).node),                            \
+       .func = (fn),                                                   \
+       .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done),             \
+       .flushing = ATOMIC_INIT(0),                                     \
+       }
+
+#define DEFINE_KTHREAD_WORKER(worker)                                  \
+       struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
+
+#define DEFINE_KTHREAD_WORK(work, fn)                                  \
+       struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
+
+/*
+ * kthread_worker.lock and kthread_work.done need their own lockdep class
+ * keys if they are defined on stack with lockdep enabled.  Use the
+ * following macros when defining them on stack.
+ */
+#ifdef CONFIG_LOCKDEP
+# define KTHREAD_WORKER_INIT_ONSTACK(worker)                           \
+       ({ init_kthread_worker(&worker); worker; })
+# define DEFINE_KTHREAD_WORKER_ONSTACK(worker)                         \
+       struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
+# define KTHREAD_WORK_INIT_ONSTACK(work, fn)                           \
+       ({ init_kthread_work((&work), fn); work; })
+# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn)                         \
+       struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn)
+#else
+# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
+# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn)
+#endif
+
+extern void __init_kthread_worker(struct kthread_worker *worker,
+                       const char *name, struct lock_class_key *key);
+
+#define init_kthread_worker(worker)                                    \
+       do {                                                            \
+               static struct lock_class_key __key;                     \
+               __init_kthread_worker((worker), "("#worker")->lock", &__key); \
+       } while (0)
+
+#define init_kthread_work(work, fn)                                    \
+       do {                                                            \
+               memset((work), 0, sizeof(struct kthread_work));         \
+               INIT_LIST_HEAD(&(work)->node);                          \
+               (work)->func = (fn);                                    \
+               init_waitqueue_head(&(work)->done);                     \
+       } while (0)
+
+int kthread_worker_fn(void *worker_ptr);
+
+bool queue_kthread_work(struct kthread_worker *worker,
+                       struct kthread_work *work);
+void flush_kthread_work(struct kthread_work *work);
+void flush_kthread_worker(struct kthread_worker *worker);
+
+#endif /* LINUX_VERSION_CODE < 2.6.35 */
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38))
+/*
+ * Kernels between 2.6.36 and 2.6.38 have the above functions but still lack the
+ * following.
+ */
+#define kthread_create_on_node(threadfn, data, node, namefmt, arg...) \
+       kthread_create(threadfn, data, namefmt, ##arg)
+
+#endif /* LINUX_VERSION_CODE < 2.6.38 */
+
+#endif /* _LINUX_KTHREAD_H */
+