struct rcu_head rcu;
unsigned long total_faults;
+ unsigned long *faults_cpu;
unsigned long faults[0];
};
int priv, i;
for (priv = 0; priv < 2; priv++) {
- long diff;
+ long diff, f_diff;
i = task_faults_idx(nid, priv);
diff = -p->numa_faults_memory[i];
+ f_diff = -p->numa_faults_cpu[i];
/* Decay existing window, copy faults since last scan */
p->numa_faults_memory[i] >>= 1;
fault_types[priv] += p->numa_faults_buffer_memory[i];
p->numa_faults_buffer_memory[i] = 0;
+ p->numa_faults_cpu[i] >>= 1;
+ p->numa_faults_cpu[i] += p->numa_faults_buffer_cpu[i];
+ p->numa_faults_buffer_cpu[i] = 0;
+
faults += p->numa_faults_memory[i];
diff += p->numa_faults_memory[i];
+ f_diff += p->numa_faults_cpu[i];
p->total_numa_faults += diff;
if (p->numa_group) {
/* safe because we can only change our own group */
p->numa_group->faults[i] += diff;
+ p->numa_group->faults_cpu[i] += f_diff;
p->numa_group->total_faults += diff;
group_faults += p->numa_group->faults[i];
}
if (unlikely(!p->numa_group)) {
unsigned int size = sizeof(struct numa_group) +
- 2*nr_node_ids*sizeof(unsigned long);
+ 4*nr_node_ids*sizeof(unsigned long);
grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!grp)
spin_lock_init(&grp->lock);
INIT_LIST_HEAD(&grp->task_list);
grp->gid = p->pid;
+ /* Second half of the array tracks nids where faults happen */
+ grp->faults_cpu = grp->faults + 2 * nr_node_ids;
- for (i = 0; i < 2*nr_node_ids; i++)
+ for (i = 0; i < 4*nr_node_ids; i++)
grp->faults[i] = p->numa_faults_memory[i];
grp->total_faults = p->total_numa_faults;
double_lock(&my_grp->lock, &grp->lock);
- for (i = 0; i < 2*nr_node_ids; i++) {
+ for (i = 0; i < 4*nr_node_ids; i++) {
my_grp->faults[i] -= p->numa_faults_memory[i];
grp->faults[i] += p->numa_faults_memory[i];
}
if (grp) {
spin_lock(&grp->lock);
- for (i = 0; i < 2*nr_node_ids; i++)
+ for (i = 0; i < 4*nr_node_ids; i++)
grp->faults[i] -= p->numa_faults_memory[i];
grp->total_faults -= p->total_numa_faults;
p->numa_faults_memory = NULL;
p->numa_faults_buffer_memory = NULL;
+ p->numa_faults_cpu= NULL;
+ p->numa_faults_buffer_cpu = NULL;
kfree(numa_faults);
}
{
struct task_struct *p = current;
bool migrated = flags & TNF_MIGRATED;
+ int this_node = task_node(current);
int priv;
if (!numabalancing_enabled)
/* Allocate buffer to track faults on a per-node basis */
if (unlikely(!p->numa_faults_memory)) {
- int size = sizeof(*p->numa_faults_memory) * 2 * nr_node_ids;
+ int size = sizeof(*p->numa_faults_memory) * 4 * nr_node_ids;
/* numa_faults and numa_faults_buffer share the allocation */
p->numa_faults_memory = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
return;
BUG_ON(p->numa_faults_buffer_memory);
- p->numa_faults_buffer_memory = p->numa_faults_memory + (2 * nr_node_ids);
+ p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
+ p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
+ p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
p->total_numa_faults = 0;
memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
}
p->numa_pages_migrated += pages;
p->numa_faults_buffer_memory[task_faults_idx(node, priv)] += pages;
+ p->numa_faults_buffer_cpu[task_faults_idx(this_node, priv)] += pages;
p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
}