]> git.openfabrics.org - ~shefty/rdma-dev.git/commitdiff
locking, timer_stats: Annotate table_lock as raw
authorThomas Gleixner <tglx@linutronix.de>
Wed, 16 Jun 2010 14:58:34 +0000 (16:58 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 13 Sep 2011 09:12:00 +0000 (11:12 +0200)
The table_lock lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Reported-by: Andreas Sundebo <kernel@sundebo.dk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Andreas Sundebo <kernel@sundebo.dk>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/time/timer_stats.c

index a5d0a3a85dd8233ddf515c50d73c4f1a585d6359..0b537f27b5591d5300427fffa4948619c0d0f4d3 100644 (file)
@@ -81,7 +81,7 @@ struct entry {
 /*
  * Spinlock protecting the tables - not taken during lookup:
  */
-static DEFINE_SPINLOCK(table_lock);
+static DEFINE_RAW_SPINLOCK(table_lock);
 
 /*
  * Per-CPU lookup locks for fast hash lookup:
@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
        prev = NULL;
        curr = *head;
 
-       spin_lock(&table_lock);
+       raw_spin_lock(&table_lock);
        /*
         * Make sure we have not raced with another CPU:
         */
@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
                        *head = curr;
        }
  out_unlock:
-       spin_unlock(&table_lock);
+       raw_spin_unlock(&table_lock);
 
        return curr;
 }