diff -purN -X /home/mbligh/.diff.exclude 350-qlogic/arch/i386/Kconfig 360-schedstat/arch/i386/Kconfig
--- 350-qlogic/arch/i386/Kconfig	2003-10-29 13:58:49.000000000 -0800
+++ 360-schedstat/arch/i386/Kconfig	2003-10-29 13:59:00.000000000 -0800
@@ -1348,6 +1348,19 @@ config FRAME_POINTER
 	  If you don't debug the kernel, you can say N, but we may not be able
 	  to solve problems without frame pointers.
 
+config SCHEDSTATS
+	bool "Collect scheduler statistics"
+	depends on PROC_FS
+	default y
+	help
+	  If you say Y here, additional code will be inserted into the
+	  scheduler and related routines to collect statistics about
+	  scheduler behavior and provide them in /proc/schedstat.  These
+	  stats may be useful for both tuning and debugging the scheduler
+	  If you aren't debugging the scheduler or trying to tune a specific
+	  application, you can say N to avoid the very slight overhead
+	  this adds.
+
 config X86_EXTRA_IRQS
 	bool
 	depends on X86_LOCAL_APIC || X86_VOYAGER
diff -purN -X /home/mbligh/.diff.exclude 350-qlogic/fs/proc/array.c 360-schedstat/fs/proc/array.c
--- 350-qlogic/fs/proc/array.c	2003-10-21 11:16:10.000000000 -0700
+++ 360-schedstat/fs/proc/array.c	2003-10-29 13:59:00.000000000 -0800
@@ -345,7 +345,7 @@ int proc_pid_stat(struct task_struct *ta
 	read_unlock(&tasklist_lock);
 	res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
 %lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \
-%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n",
+%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu %lu %lu %lu\n",
 		task->pid,
 		task->comm,
 		state,
@@ -391,7 +391,14 @@ int proc_pid_stat(struct task_struct *ta
 		task->exit_signal,
 		task_cpu(task),
 		task->rt_priority,
+#ifdef CONFIG_SCHEDSTATS
+		task->policy,
+		task->sched_info.cpu_time,
+		task->sched_info.run_delay,
+		task->sched_info.pcnt);
+#else
 		task->policy);
+#endif /* CONFIG_SCHEDSTATS */
 	if(mm)
 		mmput(mm);
 	return res;
diff -purN -X /home/mbligh/.diff.exclude 350-qlogic/fs/proc/proc_misc.c 360-schedstat/fs/proc/proc_misc.c
--- 350-qlogic/fs/proc/proc_misc.c	2003-10-29 13:58:49.000000000 -0800
+++ 360-schedstat/fs/proc/proc_misc.c	2003-10-29 13:59:00.000000000 -0800
@@ -325,6 +325,10 @@ static struct file_operations proc_vmsta
 	.release	= seq_release,
 };
 
+#ifdef CONFIG_SCHEDSTATS
+extern struct file_operations proc_schedstat_operations;
+#endif
+
 #ifdef CONFIG_PROC_HARDWARE
 static int hardware_read_proc(char *page, char **start, off_t off,
 				 int count, int *eof, void *data)
@@ -820,6 +824,9 @@ void __init proc_misc_init(void)
 #ifdef CONFIG_MODULES
 	create_seq_entry("modules", 0, &proc_modules_operations);
 #endif
+#ifdef CONFIG_SCHEDSTATS
+	create_seq_entry("schedstat", 0, &proc_schedstat_operations);
+#endif
 #ifdef CONFIG_NUMA
 	create_seq_entry("meminfo.numa",0,&proc_meminfo_numa_operations);
 #endif
diff -purN -X /home/mbligh/.diff.exclude 350-qlogic/include/linux/sched.h 360-schedstat/include/linux/sched.h
--- 350-qlogic/include/linux/sched.h	2003-10-29 13:58:27.000000000 -0800
+++ 360-schedstat/include/linux/sched.h	2003-10-29 13:59:00.000000000 -0800
@@ -101,6 +101,16 @@ extern unsigned long nr_running_cpu(int 
 extern unsigned long nr_uninterruptible(void);
 extern unsigned long nr_iowait(void);
 
+#ifdef CONFIG_SCHEDSTATS
+struct sched_info;
+extern void cpu_sched_info(struct sched_info *, int);
+#define SCHEDSTAT_INC(cpu, field)	schedstats[cpu].field++;
+#define SCHEDSTAT_ADD(cpu, field, amt)	schedstats[cpu].field += amt;
+#else
+#define SCHEDSTAT_INC(cpu, field)	{}
+#define SCHEDSTAT_ADD(cpu, field, amt)	{}
+#endif
+
 #include <linux/time.h>
 #include <linux/param.h>
 #include <linux/resource.h>
@@ -335,6 +345,18 @@ struct k_itimer {
 	struct sigqueue *sigq;		/* signal queue entry. */
 };
 
+#ifdef CONFIG_SCHEDSTATS
+struct sched_info {
+	/* cumulative counters */
+	unsigned long	cpu_time,	/* time spent on the cpu */
+			run_delay,	/* time spent waiting on a runqueue */
+			pcnt;		/* # of timeslices run on this cpu */
+
+	/* timestamps */
+	unsigned long	last_arrival,	/* when we last ran on a cpu */
+			last_queued;	/* when we were last queued to run */
+};
+#endif /* CONFIG_SCHEDSTATS */
 
 struct io_context;			/* See blkdev.h */
 void exit_io_context(void);
@@ -361,6 +383,10 @@ struct task_struct {
 	cpumask_t cpus_allowed;
 	unsigned int time_slice, first_time_slice;
 
+#ifdef CONFIG_SCHEDSTATS
+	struct sched_info sched_info;
+#endif /* CONFIG_SCHEDSTATS */
+
 	struct list_head tasks;
 	struct list_head ptrace_children;
 	struct list_head ptrace_list;
diff -purN -X /home/mbligh/.diff.exclude 350-qlogic/kernel/fork.c 360-schedstat/kernel/fork.c
--- 350-qlogic/kernel/fork.c	2003-10-29 13:58:27.000000000 -0800
+++ 360-schedstat/kernel/fork.c	2003-10-29 13:59:00.000000000 -0800
@@ -903,6 +903,9 @@ struct task_struct *copy_process(unsigne
 	p->start_time = get_jiffies_64();
 	p->security = NULL;
 	p->io_context = NULL;
+ #ifdef CONFIG_SCHEDSTATS
+ 	memset(&p->sched_info, 0, sizeof(p->sched_info));
+ #endif /* CONFIG_SCHEDSTATS */
 
 	retval = -ENOMEM;
 	if ((retval = security_task_alloc(p)))
diff -purN -X /home/mbligh/.diff.exclude 350-qlogic/kernel/sched.c 360-schedstat/kernel/sched.c
--- 350-qlogic/kernel/sched.c	2003-10-29 13:20:32.000000000 -0800
+++ 360-schedstat/kernel/sched.c	2003-10-29 13:59:00.000000000 -0800
@@ -37,6 +37,8 @@
 #include <linux/rcupdate.h>
 #include <linux/cpu.h>
 #include <linux/percpu.h>
+#include <linux/seq_file.h>
+#include <linux/times.h>
 
 #ifdef CONFIG_NUMA
 #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
@@ -210,6 +212,9 @@ struct runqueue {
 	unsigned long nr_running, nr_switches, expired_timestamp,
 			nr_uninterruptible;
 	task_t *curr, *idle;
+#ifdef CONFIG_SCHEDSTATS
+	int cpu;  /* to make easy reverse-lookups with per-cpu runqueues */
+#endif
 	struct mm_struct *prev_mm;
 	prio_array_t *active, *expired, arrays[2];
 	int prev_cpu_load[NR_CPUS];
@@ -221,6 +226,10 @@ struct runqueue {
 	struct list_head migration_queue;
 
 	atomic_t nr_iowait;
+
+#ifdef CONFIG_SCHEDSTATS
+	struct sched_info info;
+#endif
 };
 
 static DEFINE_PER_CPU(struct runqueue, runqueues);
@@ -284,6 +293,140 @@ __init void node_nr_running_init(void)
 
 #endif /* CONFIG_NUMA */
 
+
+#ifdef CONFIG_SCHEDSTATS
+struct schedstat {
+	/* sys_sched_yield stats */
+	unsigned long yld_exp_empty;
+	unsigned long yld_act_empty;
+	unsigned long yld_both_empty;
+	unsigned long yld_cnt;
+
+	/* schedule stats */
+	unsigned long sched_noswitch;
+	unsigned long sched_switch;
+	unsigned long sched_cnt;
+
+	/* load_balance stats */
+	unsigned long lb_imbalance;
+	unsigned long lb_idle;
+	unsigned long lb_busy;
+	unsigned long lb_resched;
+	unsigned long lb_cnt;
+	unsigned long lb_nobusy;
+	unsigned long lb_bnode;
+
+	/* pull_task stats */
+	unsigned long pt_gained;
+	unsigned long pt_lost;
+	unsigned long pt_node_gained;
+	unsigned long pt_node_lost;
+
+	/* balance_node stats */
+	unsigned long bn_cnt;
+	unsigned long bn_idle;
+} ____cacheline_aligned;
+
+/*
+ * bump this up when changing the output format or the meaning of an existing
+ * format, so that tools can adapt (or abort)
+ */
+#define SCHEDSTAT_VERSION	4
+
+struct schedstat schedstats[NR_CPUS];
+
+static int show_schedstat(struct seq_file *seq, void *v)
+{
+	struct schedstat sums;
+	int i;
+
+	memset(&sums, 0, sizeof(sums));
+	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
+	seq_printf(seq, "timestamp %lu\n", jiffies);
+	for (i = 0; i < NR_CPUS; i++) {
+
+		struct sched_info info;
+
+		if (!cpu_online(i)) continue;
+
+		cpu_sched_info(&info, i);
+
+		sums.yld_exp_empty += schedstats[i].yld_exp_empty;
+		sums.yld_act_empty += schedstats[i].yld_act_empty;
+		sums.yld_both_empty += schedstats[i].yld_both_empty;
+		sums.yld_cnt += schedstats[i].yld_cnt;
+		sums.sched_noswitch += schedstats[i].sched_noswitch;
+		sums.sched_switch += schedstats[i].sched_switch;
+		sums.sched_cnt += schedstats[i].sched_cnt;
+		sums.lb_idle += schedstats[i].lb_idle;
+		sums.lb_busy += schedstats[i].lb_busy;
+		sums.lb_resched += schedstats[i].lb_resched;
+		sums.lb_cnt += schedstats[i].lb_cnt;
+		sums.lb_imbalance += schedstats[i].lb_imbalance;
+		sums.lb_nobusy += schedstats[i].lb_nobusy;
+		sums.lb_bnode += schedstats[i].lb_bnode;
+		sums.pt_node_gained += schedstats[i].pt_node_gained;
+		sums.pt_node_lost += schedstats[i].pt_node_lost;
+		sums.pt_gained += schedstats[i].pt_gained;
+		sums.pt_lost += schedstats[i].pt_lost;
+		sums.bn_cnt += schedstats[i].bn_cnt;
+		sums.bn_idle += schedstats[i].bn_idle;
+		seq_printf(seq, 
+		    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu "
+		    "%lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
+		    i, schedstats[i].yld_both_empty,
+		    schedstats[i].yld_act_empty, schedstats[i].yld_exp_empty,
+		    schedstats[i].yld_cnt, schedstats[i].sched_noswitch,
+		    schedstats[i].sched_switch, schedstats[i].sched_cnt,
+		    schedstats[i].lb_idle, schedstats[i].lb_busy,
+		    schedstats[i].lb_resched,
+		    schedstats[i].lb_cnt, schedstats[i].lb_imbalance,
+		    schedstats[i].lb_nobusy, schedstats[i].lb_bnode,
+		    schedstats[i].pt_gained, schedstats[i].pt_lost,
+		    schedstats[i].pt_node_gained, schedstats[i].pt_node_lost,
+		    schedstats[i].bn_cnt, schedstats[i].bn_idle,
+		    info.cpu_time, info.run_delay, info.pcnt);
+	}
+	seq_printf(seq, 
+	    "totals %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu "
+	    "%lu %lu %lu %lu %lu %lu %lu\n",
+	    sums.yld_both_empty, sums.yld_act_empty, sums.yld_exp_empty,
+	    sums.yld_cnt, sums.sched_noswitch, sums.sched_switch,
+	    sums.sched_cnt, sums.lb_idle, sums.lb_busy, sums.lb_resched,
+	    sums.lb_cnt, sums.lb_imbalance, sums.lb_nobusy, sums.lb_bnode,
+	    sums.pt_gained, sums.pt_lost, sums.pt_node_gained,
+	    sums.pt_node_lost, sums.bn_cnt, sums.bn_idle);
+
+	return 0;
+}
+
+static int schedstat_open(struct inode *inode, struct file *file)
+{
+	unsigned size = 4096 * (1 + num_online_cpus() / 32);
+	char *buf = kmalloc(size, GFP_KERNEL);
+	struct seq_file *m;
+	int res;
+
+	if (!buf)
+		return -ENOMEM;
+	res = single_open(file, show_schedstat, NULL);
+	if (!res) {
+		m = file->private_data;
+		m->buf = buf;
+		m->size = size;
+	} else
+		kfree(buf);
+	return res;
+}
+
+struct file_operations proc_schedstat_operations = {
+	.open    = schedstat_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+#endif
+
 /*
  * task_rq_lock - lock the runqueue a given task resides on and disable
  * interrupts.  Note the ordering: we can safely lookup the task_rq without
@@ -328,6 +471,113 @@ static inline void rq_unlock(runqueue_t 
 	spin_unlock_irq(&rq->lock);
 }
 
+#ifdef CONFIG_SCHEDSTATS
+/*
+ * Called when a process is dequeued from the active array and given
+ * the cpu.  We should note that with the exception of interactive
+ * tasks, the expired queue will become the active queue after the active
+ * queue is empty, without explicitly dequeuing and requeuing tasks in the
+ * expired queue.  (Interactive tasks may be requeued directly to the
+ * active queue, thus delaying tasks in the expired queue from running;
+ * see scheduler_tick()).
+ *
+ * This function is only called from sched_info_arrive(), rather than
+ * dequeue_task(). Even though a task may be queued and dequeued multiple
+ * times as it is shuffled about, we're really interested in knowing how
+ * long it was from the *first* time it was queued to the time that it
+ * finally hit a cpu.
+ */
+static inline void sched_info_dequeued(task_t *t)
+{
+	t->sched_info.last_queued = 0;
+}
+
+/*
+ * Called when a task finally hits the cpu.  We can now calculate how
+ * long it was waiting to run.  We also note when it began so that we
+ * can keep stats on how long its timeslice is.
+ */
+static inline void sched_info_arrive(task_t *t)
+{
+	unsigned long now  = jiffies;
+	unsigned long diff = 0;
+	struct runqueue *rq = task_rq(t);
+
+	if (t->sched_info.last_queued)
+		diff = now - t->sched_info.last_queued;
+	sched_info_dequeued(t);
+	t->sched_info.run_delay += diff;
+	t->sched_info.last_arrival = now;
+	t->sched_info.pcnt++;
+
+	if (!rq)
+		return;
+	
+	rq->info.run_delay += diff;
+	rq->info.pcnt++;
+}
+
+/*
+ * Called when a process is queued into either the active or expired
+ * array.  The time is noted and later used to determine how long we
+ * had to wait for us to reach the cpu.  Since the expired queue will
+ * become the active queue after active queue is empty, without dequeuing
+ * and requeuing any tasks, we are interested in queuing to either. It
+ * is unusual but not impossible for tasks to be dequeued and immediately
+ * requeued in the same or another array: this can happen in sched_yield(),
+ * set_user_nice(), and even load_balance() as it moves tasks from runqueue
+ * to runqueue.
+ *
+ * This function is only called from enqueue_task(), but also only updates
+ * the timestamp if it is already not set.  It's assumed that
+ * sched_info_dequeued() will clear that stamp when appropriate.
+ */
+static inline void sched_info_queued(task_t *t)
+{
+	if (!t->sched_info.last_queued)
+		t->sched_info.last_queued = jiffies;
+}
+
+/*
+ * Called when a process ceases being the active-running process, either
+ * voluntarily or involuntarily.  Now we can calculate how long we ran.
+ */
+static inline void sched_info_depart(task_t *t)
+{
+	struct runqueue *rq = task_rq(t);
+	unsigned long diff = jiffies - t->sched_info.last_arrival;
+
+	t->sched_info.cpu_time += diff;
+
+	if (rq)
+		rq->info.cpu_time += diff;
+}
+
+/*
+ * Called when tasks are switched involuntarily due, typically, to expiring
+ * their time slice.  (This may also be called when switching to or from
+ * the idle task.)  We are only called when prev != next.
+ */
+static inline void sched_info_switch(task_t *prev, task_t *next)
+{
+	struct runqueue *rq = task_rq(prev);
+
+	/*
+	 * prev now departs the cpu.  It's not interesting to record
+	 * stats about how efficient we were at scheduling the idle
+	 * process, however.
+	 */
+	if (prev != rq->idle)
+		sched_info_depart(prev);
+
+	if (next != rq->idle)
+		sched_info_arrive(next);
+}
+#else
+#define sched_info_queued(t)		{}
+#define sched_info_switch(t, next)	{}
+#endif /* CONFIG_SCHEDSTATS */
+
 /*
  * Adding/removing a task to/from a priority array:
  */
@@ -341,6 +591,7 @@ static inline void dequeue_task(struct t
 
 static inline void enqueue_task(struct task_struct *p, prio_array_t *array)
 {
+	sched_info_queued(p);
 	list_add_tail(&p->run_list, array->queue + p->prio);
 	__set_bit(p->prio, array->bitmap);
 	array->nr_active++;
@@ -875,6 +1126,13 @@ unsigned long nr_iowait(void)
 	return sum;
 }
 
+#ifdef CONFIG_SCHEDSTATS
+void cpu_sched_info(struct sched_info *info, int cpu)
+{
+	memcpy(info, &cpu_rq(cpu)->info, sizeof(struct sched_info));
+}
+#endif /* CONFIG_SCHEDSTATS */
+
 /*
  * double_rq_lock - safely lock two runqueues
  *
@@ -1168,6 +1426,14 @@ out:
  */
 static inline void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, int this_cpu)
 {
+#ifdef CONFIG_NUMA
+	if (cpu_to_node(this_cpu) != cpu_to_node(src_rq->cpu)) {
+		SCHEDSTAT_INC(this_cpu, pt_node_gained);
+		SCHEDSTAT_INC(src_rq->cpu, pt_node_lost);
+	}
+#endif
+	SCHEDSTAT_INC(this_cpu, pt_gained);
+	SCHEDSTAT_INC(src_rq->cpu, pt_lost);
 	dequeue_task(p, src_array);
 	nr_running_dec(src_rq);
 	set_task_cpu(p, this_cpu);
@@ -1220,9 +1486,14 @@ static void load_balance(runqueue_t *thi
 	struct list_head *head, *curr;
 	task_t *tmp;
 
+	SCHEDSTAT_INC(this_cpu, lb_cnt);
 	busiest = find_busiest_queue(this_rq, this_cpu, idle, &imbalance, cpumask);
-	if (!busiest)
-		goto out;
+	if (!busiest) {
+		SCHEDSTAT_INC(this_cpu, lb_nobusy);
+  		goto out;
+	}
+  
+	SCHEDSTAT_ADD(this_cpu, lb_imbalance, imbalance);
 
 	/*
 	 * We only want to steal a number of tasks equal to 1/2 the imbalance,
@@ -1311,8 +1582,10 @@ static void balance_node(runqueue_t *thi
 {
 	int node = find_busiest_node(cpu_to_node(this_cpu));
 
+	SCHEDSTAT_INC(this_cpu, bn_cnt);
 	if (node >= 0) {
 		cpumask_t cpumask = node_to_cpumask(node);
+		SCHEDSTAT_INC(this_cpu, lb_bnode);
 		cpu_set(this_cpu, cpumask);
 		spin_lock(&this_rq->lock);
 		load_balance(this_rq, idle, cpumask);
@@ -1323,9 +1596,7 @@ static void balance_node(runqueue_t *thi
 
 static void rebalance_tick(runqueue_t *this_rq, int idle)
 {
-#ifdef CONFIG_NUMA
 	int this_cpu = smp_processor_id();
-#endif
 	unsigned long j = jiffies;
 
 	/*
@@ -1338,11 +1609,14 @@ static void rebalance_tick(runqueue_t *t
 	 */
 	if (idle) {
 #ifdef CONFIG_NUMA
-		if (!(j % IDLE_NODE_REBALANCE_TICK))
+		if (!(j % IDLE_NODE_REBALANCE_TICK)) {
+			SCHEDSTAT_INC(this_cpu, bn_idle);
 			balance_node(this_rq, idle, this_cpu);
+		}
 #endif
 		if (!(j % IDLE_REBALANCE_TICK)) {
 			spin_lock(&this_rq->lock);
+			SCHEDSTAT_INC(this_cpu, lb_idle);
 			load_balance(this_rq, idle, cpu_to_node_mask(this_cpu));
 			spin_unlock(&this_rq->lock);
 		}
@@ -1354,6 +1628,7 @@ static void rebalance_tick(runqueue_t *t
 #endif
 	if (!(j % BUSY_REBALANCE_TICK)) {
 		spin_lock(&this_rq->lock);
+		SCHEDSTAT_INC(this_cpu, lb_busy);
 		load_balance(this_rq, idle, cpu_to_node_mask(this_cpu));
 		spin_unlock(&this_rq->lock);
 	}
@@ -1514,13 +1789,14 @@ asmlinkage void schedule(void)
 	struct list_head *queue;
 	unsigned long long now;
 	unsigned long run_time;
-	int idx;
+	int idx, this_cpu = smp_processor_id();
 
 	/*
 	 * Test if we are atomic.  Since do_exit() needs to call into
 	 * schedule() atomically, we ignore that path for now.
 	 * Otherwise, whine if we are scheduling when we should not be.
 	 */
+	SCHEDSTAT_INC(this_cpu, sched_cnt);
 	if (likely(!(current->state & (TASK_DEAD | TASK_ZOMBIE)))) {
 		if (unlikely(in_atomic())) {
 			printk(KERN_ERR "bad: scheduling while atomic!\n");
@@ -1573,6 +1849,7 @@ need_resched:
 pick_next_task:
 	if (unlikely(!rq->nr_running)) {
 #ifdef CONFIG_SMP
+		SCHEDSTAT_INC(this_cpu, lb_resched);
 		load_balance(rq, 1, cpu_to_node_mask(smp_processor_id()));
 		if (rq->nr_running)
 			goto pick_next_task;
@@ -1587,11 +1864,13 @@ pick_next_task:
 		/*
 		 * Switch the active and expired arrays.
 		 */
+		SCHEDSTAT_INC(this_cpu, sched_switch);
 		rq->active = rq->expired;
 		rq->expired = array;
 		array = rq->active;
 		rq->expired_timestamp = 0;
 	}
+	SCHEDSTAT_INC(this_cpu, sched_noswitch);
 
 	idx = sched_find_first_bit(array->bitmap);
 	queue = array->queue + idx;
@@ -1622,6 +1901,7 @@ switch_tasks:
 	}
 	prev->timestamp = now;
 
+	sched_info_switch(prev, next);
 	if (likely(prev != next)) {
 		next->timestamp = now;
 		rq->nr_switches++;
@@ -2311,6 +2591,9 @@ asmlinkage long sys_sched_yield(void)
 {
 	runqueue_t *rq = this_rq_lock();
 	prio_array_t *array = current->array;
+#ifdef CONFIG_SCHEDSTATS
+	int this_cpu = smp_processor_id();
+#endif /* CONFIG_SCHEDSTATS */
 
 	/*
 	 * We implement yielding by moving the task into the expired
@@ -2319,7 +2602,16 @@ asmlinkage long sys_sched_yield(void)
 	 * (special rule: RT tasks will just roundrobin in the active
 	 *  array.)
 	 */
+	SCHEDSTAT_INC(this_cpu, yld_cnt);
 	if (likely(!rt_task(current))) {
+		if (current->array->nr_active == 1) {
+		    SCHEDSTAT_INC(this_cpu, yld_act_empty);
+		    if (!rq->expired->nr_active) {
+			SCHEDSTAT_INC(this_cpu, yld_both_empty);
+		    }
+		} else if (!rq->expired->nr_active) {
+			SCHEDSTAT_INC(this_cpu, yld_exp_empty);
+		}
 		dequeue_task(current, array);
 		enqueue_task(current, rq->expired);
 	} else {
@@ -2861,6 +3153,9 @@ void __init sched_init(void)
 		rq = cpu_rq(i);
 		rq->active = rq->arrays;
 		rq->expired = rq->arrays + 1;
+#ifdef CONFIG_SCHEDSTATS
+		rq->cpu = i;
+#endif /* CONFIG_SCHEDSTATS */
 		spin_lock_init(&rq->lock);
 		INIT_LIST_HEAD(&rq->migration_queue);
 		atomic_set(&rq->nr_iowait, 0);