DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
unsigned long pte_freelist_forced_free;
+void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
+{
+ /* This is safe as we are holding page_table_lock */
+ cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
+ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+
+ if (atomic_read(&tlb->mm->mm_users) < 2 ||
+ cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
+ pte_free(ptepage);
+ return;
+ }
+
+ if (*batchp == NULL) {
+ *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
+ if (*batchp == NULL) {
+ pte_free_now(ptepage);
+ return;
+ }
+ (*batchp)->index = 0;
+ }
+ (*batchp)->pages[(*batchp)->index++] = ptepage;
+ if ((*batchp)->index == PTE_FREELIST_SIZE) {
+ pte_free_submit(*batchp);
+ *batchp = NULL;
+ }
+}
+
/*
* Update the MMU hash table to correspond with a change to
* a Linux PTE. If wrprot is true, it is permissible to
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
{
int i;
- cpumask_t tmp = cpumask_of_cpu(smp_processor_id());
+ int cpu;
+ cpumask_t tmp;
int local = 0;
BUG_ON(in_interrupt());
+ cpu = get_cpu();
i = batch->index;
+ tmp = cpumask_of_cpu(cpu);
if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
local = 1;
else
flush_hash_range(batch->context, i, local);
batch->index = 0;
+ put_cpu();
}
#ifdef CONFIG_SMP
pte_free(ptepage);
}
-static void pte_free_rcu_callback(void *arg)
+static void pte_free_rcu_callback(struct rcu_head *head)
{
- struct pte_freelist_batch *batch = arg;
+ struct pte_freelist_batch *batch =
+ container_of(head, struct pte_freelist_batch, rcu);
unsigned int i;
for (i = 0; i < batch->index; i++)
void pte_free_submit(struct pte_freelist_batch *batch)
{
INIT_RCU_HEAD(&batch->rcu);
- call_rcu(&batch->rcu, pte_free_rcu_callback, batch);
+ call_rcu(&batch->rcu, pte_free_rcu_callback);
}
void pte_free_finish(void)