#include <asm/cpu/sq.h>
static LIST_HEAD(sq_mapping_list);
-static spinlock_t sq_mapping_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(sq_mapping_lock);
/**
* sq_flush - Flush (prefetch) the store queue cache
- *
* @addr: the store queue address to flush
*
* Executes a prefetch instruction on the specified store queue cache,
/**
* sq_flush_range - Flush (prefetch) a specific SQ range
- *
* @start: the store queue address to start flushing from
* @len: the length to flush
*
{
if (!list_empty(&sq_mapping_list)) {
struct list_head *pos, *tmp;
-
+
/*
* Read one off the list head, as it will have the highest
* mapped allocation. Set the next one up right above it.
/**
* __sq_remap - Perform a translation from the SQ to a phys addr
+ * @map: sq mapping containing phys and store queue addresses.
*
- * @phys: Physical address to map store queues too.
- * @virt: Associated store queue address.
- *
- * Maps the store queue address @virt to the physical address @phys.
+ * Maps the store queue address specified in the mapping to the physical
+ * address specified in the mapping.
*/
static struct sq_mapping *__sq_remap(struct sq_mapping *map)
{
/**
* sq_remap - Map a physical address through the Store Queues
- *
* @phys: Physical address of mapping.
* @size: Length of mapping.
* @name: User invoking mapping.
/**
* sq_unmap - Unmap a Store Queue allocation
- *
* @map: Pre-allocated Store Queue mapping.
*
* Unmaps the store queue allocation @map that was previously created by
/**
* sq_clear - Clear a store queue range
- *
* @addr: Address to start clearing from.
* @len: Length to clear.
*
void sq_clear(unsigned long addr, unsigned int len)
{
int i;
-
+
/* Clear out both queues linearly */
for (i = 0; i < 8; i++) {
ctrl_outl(0, addr + i + 0);
/**
* sq_vma_unmap - Unmap a VMA range
- *
* @area: VMA containing range.
* @addr: Start of range.
* @len: Length of range.
entry = list_entry(pos, typeof(*entry), list);
if (entry->sq_addr == addr) {
- /*
+ /*
* We could probably get away without doing the tlb flush
* here, as generic code should take care of most of this
* when unmapping the rest of the VMA range for us. Leave
* it in for added sanity for the time being..
*/
__flush_tlb_page(get_asid(), entry->sq_addr & PAGE_MASK);
-
+
list_del(&entry->list);
kfree(entry);
return;
- }
+ }
}
}
/**
* sq_vma_sync - Sync a VMA range
- *
* @area: VMA containing range.
* @start: Start of range.
* @len: Length of range.
/**
* sq_mmap - mmap() for /dev/cpu/sq
- *
* @file: unused.
* @vma: VMA to remap.
*
unsigned long size = vma->vm_end - vma->vm_start;
struct sq_mapping *map;
- /*
+ /*
* We're not interested in any arbitrary virtual address that has
* been stuck in the VMA, as we already know what addresses we
* want. Save off the size, and reposition the VMA to begin at
if (io_remap_page_range(vma, map->sq_addr, map->addr,
size, vma->vm_page_prot))
return -EAGAIN;
-
+
vma->vm_ops = &sq_vma_ops;
return 0;
list_for_each_prev(pos, &sq_mapping_list) {
struct sq_mapping *entry;
-
+
entry = list_entry(pos, typeof(*entry), list);
p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", entry->sq_addr,