#define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
#else
/* fill PTB entries -- we need to fill UNIT_PAGES entries */
#define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
#else
/* fill PTB entries -- we need to fill UNIT_PAGES entries */
-static int synth_alloc_pages(emu10k1_t *hw, emu10k1_memblk_t *blk);
-static int synth_free_pages(emu10k1_t *hw, emu10k1_memblk_t *blk);
+static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
+static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
-#define get_emu10k1_memblk(l,member) list_entry(l, emu10k1_memblk_t, member)
+#define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
snd_assert(blk->mapped_page >= 0, continue);
size = blk->mapped_page - page;
if (size == npages) {
snd_assert(blk->mapped_page >= 0, continue);
size = blk->mapped_page - page;
if (size == npages) {
{
int start_page, end_page, mpage, pg;
struct list_head *p;
{
int start_page, end_page, mpage, pg;
struct list_head *p;
/* calculate the expected size of empty region */
if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
/* calculate the expected size of empty region */
if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
-static emu10k1_memblk_t *
-search_empty(emu10k1_t *emu, int size)
+static struct snd_emu10k1_memblk *
+search_empty(struct snd_emu10k1 *emu, int size)
- snd_printk("max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
+ snd_printk(KERN_ERR "max memory size is 0x%lx (addr = 0x%lx)!!\n", emu->dma_mask, (unsigned long)addr);
if (idx >= sgbuf->pages) {
printk(KERN_ERR "emu: pages overflow! (%d-%d) for %d\n",
blk->first_page, blk->last_page, sgbuf->pages);
if (idx >= sgbuf->pages) {
printk(KERN_ERR "emu: pages overflow! (%d-%d) for %d\n",
blk->first_page, blk->last_page, sgbuf->pages);
return NULL;
}
#endif
addr = sgbuf->table[idx].addr;
if (! is_valid_page(emu, addr)) {
printk(KERN_ERR "emu: failure page = %d\n", idx);
return NULL;
}
#endif
addr = sgbuf->table[idx].addr;
if (! is_valid_page(emu, addr)) {
printk(KERN_ERR "emu: failure page = %d\n", idx);
blk->map_locked = 1; /* do not unmap this block! */
err = snd_emu10k1_memblk_map(emu, blk);
if (err < 0) {
blk->map_locked = 1; /* do not unmap this block! */
err = snd_emu10k1_memblk_map(emu, blk);
if (err < 0) {
{
snd_assert(emu && blk, return -EINVAL);
return snd_emu10k1_synth_free(emu, blk);
{
snd_assert(emu && blk, return -EINVAL);
return snd_emu10k1_synth_free(emu, blk);
- down(&hdr->block_mutex);
- blk = (emu10k1_memblk_t *)__snd_util_mem_alloc(hdr, size);
+ mutex_lock(&hdr->block_mutex);
+ blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
- snd_util_memhdr_t *hdr = emu->memhdr;
- emu10k1_memblk_t *blk = (emu10k1_memblk_t *)memblk;
+ struct snd_util_memhdr *hdr = emu->memhdr;
+ struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
spin_lock_irqsave(&emu->memblk_lock, flags);
if (blk->mapped_page >= 0)
unmap_memblk(emu, blk);
spin_unlock_irqrestore(&emu->memblk_lock, flags);
synth_free_pages(emu, blk);
__snd_util_mem_free(hdr, memblk);
spin_lock_irqsave(&emu->memblk_lock, flags);
if (blk->mapped_page >= 0)
unmap_memblk(emu, blk);
spin_unlock_irqrestore(&emu->memblk_lock, flags);
synth_free_pages(emu, blk);
__snd_util_mem_free(hdr, memblk);
-static void get_single_page_range(snd_util_memhdr_t *hdr, emu10k1_memblk_t *blk, int *first_page_ret, int *last_page_ret)
+static void get_single_page_range(struct snd_util_memhdr *hdr,
+ struct snd_emu10k1_memblk *blk,
+ int *first_page_ret, int *last_page_ret)
int first_page, last_page;
first_page = blk->first_page;
if ((p = blk->mem.list.prev) != &hdr->block) {
int first_page, last_page;
first_page = blk->first_page;
if ((p = blk->mem.list.prev) != &hdr->block) {
{
int page, first_page, last_page;
struct snd_dma_buffer dmab;
{
int page, first_page, last_page;
struct snd_dma_buffer dmab;
{
int page, first_page, last_page;
struct snd_dma_buffer dmab;
{
int page, first_page, last_page;
struct snd_dma_buffer dmab;
{
char *ptr;
snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL);
ptr = emu->page_ptr_table[page];
if (! ptr) {
{
char *ptr;
snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL);
ptr = emu->page_ptr_table[page];
if (! ptr) {
-int snd_emu10k1_synth_bzero(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, int size)
+int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
+ int offset, int size)
{
int page, nextofs, end_offset, temp, temp1;
void *ptr;
{
int page, nextofs, end_offset, temp, temp1;
void *ptr;
-int snd_emu10k1_synth_copy_from_user(emu10k1_t *emu, snd_util_memblk_t *blk, int offset, const char __user *data, int size)
+int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
+ int offset, const char __user *data, int size)
{
int page, nextofs, end_offset, temp, temp1;
void *ptr;
{
int page, nextofs, end_offset, temp, temp1;
void *ptr;