]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
perf_counter: Log irq_period changes
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 20 May 2009 10:21:20 +0000 (12:21 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 20 May 2009 10:43:33 +0000 (12:43 +0200)
For the dynamic irq_period code, log whenever we change the period so that
analyzing code can normalize the event flow.

[ Impact: add new feature to allow more precise profiling ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090520102553.298769743@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/perf_counter.h
kernel/perf_counter.c

index c8c1dfc22c938a65171670ea50b92f692f6c1867..f612941ef46ed5aebc2fc86e40f343053d9afb3c 100644 (file)
@@ -257,6 +257,14 @@ enum perf_event_type {
         */
        PERF_EVENT_COMM                 = 3,
 
+       /*
+        * struct {
+        *      struct perf_event_header        header;
+        *      u64                             irq_period;
+        * };
+        */
+       PERF_EVENT_PERIOD               = 4,
+
        /*
         * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
         * will be PERF_RECORD_*
index 64113e6d1942febe4400f2d87ebb4cd069473837..db02eb16c777fd273307b1d1eb26f9c2f78b5c66 100644 (file)
@@ -1046,7 +1046,9 @@ int perf_counter_task_enable(void)
        return 0;
 }
 
-void perf_adjust_freq(struct perf_counter_context *ctx)
+static void perf_log_period(struct perf_counter *counter, u64 period);
+
+static void perf_adjust_freq(struct perf_counter_context *ctx)
 {
        struct perf_counter *counter;
        u64 irq_period;
@@ -1072,6 +1074,8 @@ void perf_adjust_freq(struct perf_counter_context *ctx)
                if (!irq_period)
                        irq_period = 1;
 
+               perf_log_period(counter, irq_period);
+
                counter->hw.irq_period = irq_period;
                counter->hw.interrupts = 0;
        }
@@ -2406,6 +2410,40 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
        perf_counter_mmap_event(&mmap_event);
 }
 
+/*
+ *
+ */
+
+static void perf_log_period(struct perf_counter *counter, u64 period)
+{
+       struct perf_output_handle handle;
+       int ret;
+
+       struct {
+               struct perf_event_header        header;
+               u64                             time;
+               u64                             period;
+       } freq_event = {
+               .header = {
+                       .type = PERF_EVENT_PERIOD,
+                       .misc = 0,
+                       .size = sizeof(freq_event),
+               },
+               .time = sched_clock(),
+               .period = period,
+       };
+
+       if (counter->hw.irq_period == period)
+               return;
+
+       ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
+       if (ret)
+               return;
+
+       perf_output_put(&handle, freq_event);
+       perf_output_end(&handle);
+}
+
 /*
  * Generic counter overflow handling.
  */