From 9fcc15ec3c1c287a781a4620e52522b6186f26f6 Mon Sep 17 00:00:00 2001 From: Bjorn Steinbrink Date: Fri, 1 Jun 2007 00:47:15 -0700 Subject: [PATCH] timer statistics: fix race Fix two races in the timer stats lookup code. One by ensuring that the initialization of a new entry is finished upon insertion of that entry. The other by cleaning up the hash table when the entries array is cleared, so that we don't have any "pre-inserted" entries. Thanks to Eric Dumazet for reminding me of the memory barriers. Signed-off-by: Bjorn Steinbrink Signed-off-by: Ian Kumlien Acked-by: Ingo Molnar Cc: Eric Dumazet Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/timer_stats.c | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index 868f1bceb07..fa3d380ca8c 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c @@ -117,21 +117,6 @@ static struct entry entries[MAX_ENTRIES]; static atomic_t overflow_count; -static void reset_entries(void) -{ - nr_entries = 0; - memset(entries, 0, sizeof(entries)); - atomic_set(&overflow_count, 0); -} - -static struct entry *alloc_entry(void) -{ - if (nr_entries >= MAX_ENTRIES) - return NULL; - - return entries + nr_entries++; -} - /* * The entries are in a hash-table, for fast lookup: */ @@ -149,6 +134,22 @@ static struct entry *alloc_entry(void) static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly; +static void reset_entries(void) +{ + nr_entries = 0; + memset(entries, 0, sizeof(entries)); + memset(tstat_hash_table, 0, sizeof(tstat_hash_table)); + atomic_set(&overflow_count, 0); +} + +static struct entry *alloc_entry(void) +{ + if (nr_entries >= MAX_ENTRIES) + return NULL; + + return entries + nr_entries++; +} + static int match_entries(struct entry *entry1, struct entry *entry2) { return entry1->timer == entry2->timer && @@ -202,12 +203,15 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm) if (curr) { *curr = *entry; curr->count = 0; + curr->next = NULL; memcpy(curr->comm, comm, TASK_COMM_LEN); + + smp_mb(); /* Ensure that curr is initialized before insert */ + if (prev) prev->next = curr; else *head = curr; - curr->next = NULL; } out_unlock: spin_unlock(&table_lock); @@ -360,6 +364,7 @@ static ssize_t tstats_write(struct file *file, const char __user *buf, if (!active) { reset_entries(); time_start = ktime_get(); + smp_mb(); active = 1; } break; -- 2.41.1