#include <linux/workqueue.h>
#include <linux/if_vlan.h>
#include <linux/bitops.h>
-#include <linux/mutex.h>
#include <asm/system.h>
#include <asm/io.h>
spin_unlock_irqrestore(&gp->lock, flags);
if (request_irq(gp->pdev->irq, gem_interrupt,
- IRQF_SHARED, dev->name, (void *)dev)) {
+ SA_SHIRQ, dev->name, (void *)dev)) {
printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
spin_lock_irqsave(&gp->lock, flags);
{
struct gem *gp = (struct gem *) data;
- mutex_lock(&gp->pm_mutex);
+ down(&gp->pm_sem);
netif_poll_disable(gp->dev);
netif_poll_enable(gp->dev);
- mutex_unlock(&gp->pm_mutex);
+ up(&gp->pm_sem);
}
struct gem *gp = dev->priv;
int rc = 0;
- mutex_lock(&gp->pm_mutex);
+ down(&gp->pm_sem);
/* We need the cell enabled */
if (!gp->asleep)
rc = gem_do_start(dev);
gp->opened = (rc == 0);
- mutex_unlock(&gp->pm_mutex);
+ up(&gp->pm_sem);
return rc;
}
* our caller (dev_close) already did it for us
*/
- mutex_lock(&gp->pm_mutex);
+ down(&gp->pm_sem);
gp->opened = 0;
if (!gp->asleep)
gem_do_stop(dev, 0);
- mutex_unlock(&gp->pm_mutex);
+ up(&gp->pm_sem);
return 0;
}
struct gem *gp = dev->priv;
unsigned long flags;
- mutex_lock(&gp->pm_mutex);
+ down(&gp->pm_sem);
netif_poll_disable(dev);
/* Stop the link timer */
del_timer_sync(&gp->link_timer);
- /* Now we release the mutex to not block the reset task who
+ /* Now we release the semaphore to not block the reset task who
* can take it too. We are marked asleep, so there will be no
* conflict here
*/
- mutex_unlock(&gp->pm_mutex);
+ up(&gp->pm_sem);
/* Wait for a pending reset task to complete */
while (gp->reset_task_pending)
printk(KERN_INFO "%s: resuming\n", dev->name);
- mutex_lock(&gp->pm_mutex);
+ down(&gp->pm_sem);
/* Keep the cell enabled during the entire operation, no need to
* take a lock here tho since nothing else can happen while we are
* still asleep, a new sleep cycle may bring it back
*/
gem_put_cell(gp);
- mutex_unlock(&gp->pm_mutex);
+ up(&gp->pm_sem);
return 0;
}
pci_set_master(gp->pdev);
netif_poll_enable(dev);
- mutex_unlock(&gp->pm_mutex);
+ up(&gp->pm_sem);
return 0;
}
return 0;
}
- mutex_lock(&gp->pm_mutex);
+ down(&gp->pm_sem);
spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock);
dev->mtu = new_mtu;
}
spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
- mutex_unlock(&gp->pm_mutex);
+ up(&gp->pm_sem);
return 0;
}
int rc = -EOPNOTSUPP;
unsigned long flags;
- /* Hold the PM mutex while doing ioctl's or we may collide
+ /* Hold the PM semaphore while doing ioctl's or we may collide
* with power management.
*/
- mutex_lock(&gp->pm_mutex);
+ down(&gp->pm_sem);
spin_lock_irqsave(&gp->lock, flags);
gem_get_cell(gp);
gem_put_cell(gp);
spin_unlock_irqrestore(&gp->lock, flags);
- mutex_unlock(&gp->pm_mutex);
+ up(&gp->pm_sem);
return rc;
}
#if defined(__sparc__)
struct pci_dev *pdev = gp->pdev;
struct pcidev_cookie *pcp = pdev->sysdata;
- int use_idprom = 1;
+ int node = -1;
if (pcp != NULL) {
- unsigned char *addr;
- int len;
-
- addr = of_get_property(pcp->prom_node, "local-mac-address",
- &len);
- if (addr && len == 6) {
- use_idprom = 0;
- memcpy(dev->dev_addr, addr, 6);
- }
+ node = pcp->prom_node;
+ if (prom_getproplen(node, "local-mac-address") == 6)
+ prom_getproperty(node, "local-mac-address",
+ dev->dev_addr, 6);
+ else
+ node = -1;
}
- if (use_idprom)
+ if (node == -1)
memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
#elif defined(CONFIG_PPC_PMAC)
unsigned char *addr;
spin_lock_init(&gp->lock);
spin_lock_init(&gp->tx_lock);
- mutex_init(&gp->pm_mutex);
+ init_MUTEX(&gp->pm_sem);
init_timer(&gp->link_timer);
gp->link_timer.function = gem_link_timer;