vserver 1.9.5.x5
[linux-2.6.git] / arch / ia64 / sn / pci / pci_dma.c
index 71f311d..f680824 100644 (file)
@@ -3,52 +3,85 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2000,2002-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
  *
- * Routines for PCI DMA mapping.  See Documentation/DMA-mapping.txt for
+ * Routines for PCI DMA mapping.  See Documentation/DMA-API.txt for
  * a description of how these routines should be used.
  */
 
 #include <linux/module.h>
+#include <asm/dma.h>
 #include <asm/sn/sn_sal.h>
 #include "pci/pcibus_provider_defs.h"
 #include "pci/pcidev.h"
 #include "pci/pcibr_provider.h"
 
-void sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
-                    int direction);
+#define SG_ENT_VIRT_ADDRESS(sg)        (page_address((sg)->page) + (sg)->offset)
+#define SG_ENT_PHYS_ADDRESS(SG)        virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
 
 /**
- * sn_pci_alloc_consistent - allocate memory for coherent DMA
- * @hwdev: device to allocate for
+ * sn_dma_supported - test a DMA mask
+ * @dev: device to test
+ * @mask: DMA mask to test
+ *
+ * Return whether the given PCI device DMA address mask can be supported
+ * properly.  For example, if your device can only drive the low 24-bits
+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
+ * this function.  Of course, SN only supports devices that have 32 or more
+ * address bits when using the PMU.
+ */
+int sn_dma_supported(struct device *dev, u64 mask)
+{
+       BUG_ON(dev->bus != &pci_bus_type);
+
+       if (mask < 0x7fffffff)
+               return 0;
+       return 1;
+}
+EXPORT_SYMBOL(sn_dma_supported);
+
+/**
+ * sn_dma_set_mask - set the DMA mask
+ * @dev: device to set
+ * @dma_mask: new mask
+ *
+ * Set @dev's DMA mask if the hw supports it.
+ */
+int sn_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+       BUG_ON(dev->bus != &pci_bus_type);
+
+       if (!sn_dma_supported(dev, dma_mask))
+               return 0;
+
+       *dev->dma_mask = dma_mask;
+       return 1;
+}
+EXPORT_SYMBOL(sn_dma_set_mask);
+
+/**
+ * sn_dma_alloc_coherent - allocate memory for coherent DMA
+ * @dev: device to allocate for
  * @size: size of the region
  * @dma_handle: DMA (bus) address
+ * @flags: memory allocation flags
  *
- * pci_alloc_consistent() returns a pointer to a memory region suitable for
+ * dma_alloc_coherent() returns a pointer to a memory region suitable for
  * coherent DMA traffic to/from a PCI device.  On SN platforms, this means
  * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
  *
  * This interface is usually used for "command" streams (e.g. the command
- * queue for a SCSI controller).  See Documentation/DMA-mapping.txt for
+ * queue for a SCSI controller).  See Documentation/DMA-API.txt for
  * more information.
- *
- * Also known as platform_pci_alloc_consistent() by the IA64 machvec code.
  */
-void *sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
-                             dma_addr_t * dma_handle)
+void *sn_dma_alloc_coherent(struct device *dev, size_t size,
+                           dma_addr_t * dma_handle, int flags)
 {
        void *cpuaddr;
        unsigned long phys_addr;
-       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
-       struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
-
-       if (bussoft == NULL) {
-               return NULL;
-       }
+       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
 
-       if (! IS_PCI_BRIDGE_ASIC(bussoft->bs_asic_type)) {
-               return NULL;            /* unsupported asic type */
-       }
+       BUG_ON(dev->bus != &pci_bus_type);
 
        /*
         * Allocate the memory.
@@ -66,151 +99,52 @@ void *sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
        /*
         * 64 bit address translations should never fail.
         * 32 bit translations can fail if there are insufficient mapping
-        *   resources.
+        * resources.
         */
 
-       *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size, SN_PCIDMA_CONSISTENT);
+       *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size,
+                                   SN_PCIDMA_CONSISTENT);
        if (!*dma_handle) {
-               printk(KERN_ERR
-                      "sn_pci_alloc_consistent():  failed  *dma_handle = 0x%lx hwdev->dev.coherent_dma_mask = 0x%lx \n",
-                      *dma_handle, hwdev->dev.coherent_dma_mask);
+               printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
                free_pages((unsigned long)cpuaddr, get_order(size));
                return NULL;
        }
 
        return cpuaddr;
 }
+EXPORT_SYMBOL(sn_dma_alloc_coherent);
 
 /**
- * sn_pci_free_consistent - free memory associated with coherent DMAable region
- * @hwdev: device to free for
+ * sn_pci_free_coherent - free memory associated with coherent DMAable region
+ * @dev: device to free for
  * @size: size to free
- * @vaddr: kernel virtual address to free
+ * @cpu_addr: kernel virtual address to free
  * @dma_handle: DMA address associated with this region
  *
- * Frees the memory allocated by pci_alloc_consistent().  Also known
- * as platform_pci_free_consistent() by the IA64 machvec code.
+ * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
+ * any associated IOMMU mappings.
  */
-void
-sn_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
-                      dma_addr_t dma_handle)
+void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+                         dma_addr_t dma_handle)
 {
-       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
-       struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
+       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
 
-       if (! bussoft) {
-               return;
-       }
+       BUG_ON(dev->bus != &pci_bus_type);
 
        pcibr_dma_unmap(pcidev_info, dma_handle, 0);
-       free_pages((unsigned long)vaddr, get_order(size));
-}
-
-/**
- * sn_pci_map_sg - map a scatter-gather list for DMA
- * @hwdev: device to map for
- * @sg: scatterlist to map
- * @nents: number of entries
- * @direction: direction of the DMA transaction
- *
- * Maps each entry of @sg for DMA.  Also known as platform_pci_map_sg by the
- * IA64 machvec code.
- */
-int
-sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
-             int direction)
-{
-
-       int i;
-       unsigned long phys_addr;
-       struct scatterlist *saved_sg = sg;
-       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
-       struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
-
-       /* can't go anywhere w/o a direction in life */
-       if (direction == PCI_DMA_NONE)
-               BUG();
-
-       if (! bussoft) {
-               return 0;
-       }
-
-       /* SN cannot support DMA addresses smaller than 32 bits. */
-       if (hwdev->dma_mask < 0x7fffffff)
-               return 0;
-
-       /*
-        * Setup a DMA address for each entry in the
-        * scatterlist.
-        */
-       for (i = 0; i < nents; i++, sg++) {
-               phys_addr =
-                   __pa((unsigned long)page_address(sg->page) + sg->offset);
-               sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr, sg->length, 0);
-
-               if (!sg->dma_address) {
-                       printk(KERN_ERR "sn_pci_map_sg: Unable to allocate "
-                              "anymore page map entries.\n");
-                       /*
-                        * We will need to free all previously allocated entries.
-                        */
-                       if (i > 0) {
-                               sn_pci_unmap_sg(hwdev, saved_sg, i, direction);
-                       }
-                       return (0);
-               }
-
-               sg->dma_length = sg->length;
-       }
-
-       return nents;
-
-}
-
-/**
- * sn_pci_unmap_sg - unmap a scatter-gather list
- * @hwdev: device to unmap
- * @sg: scatterlist to unmap
- * @nents: number of scatterlist entries
- * @direction: DMA direction
- *
- * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
- * concerning calls here are the same as for pci_unmap_single() below.  Also
- * known as sn_pci_unmap_sg() by the IA64 machvec code.
- */
-void
-sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
-               int direction)
-{
-       int i;
-       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
-       struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
-
-       /* can't go anywhere w/o a direction in life */
-       if (direction == PCI_DMA_NONE)
-               BUG();
-
-       if (! bussoft) {
-               return;
-       }
-
-       for (i = 0; i < nents; i++, sg++) {
-               pcibr_dma_unmap(pcidev_info, sg->dma_address, direction);
-               sg->dma_address = (dma_addr_t) NULL;
-               sg->dma_length = 0;
-       }
+       free_pages((unsigned long)cpu_addr, get_order(size));
 }
+EXPORT_SYMBOL(sn_dma_free_coherent);
 
 /**
- * sn_pci_map_single - map a single region for DMA
- * @hwdev: device to map for
- * @ptr: kernel virtual address of the region to map
+ * sn_dma_map_single - map a single page for DMA
+ * @dev: device to map for
+ * @cpu_addr: kernel virtual address of the region to map
  * @size: size of the region
  * @direction: DMA direction
  *
- * Map the region pointed to by @ptr for DMA and return the
- * DMA address.   Also known as platform_pci_map_single() by
- * the IA64 machvec code.
+ * Map the region pointed to by @cpu_addr for DMA and return the
+ * DMA address.
  *
  * We map this to the one step pcibr_dmamap_trans interface rather than
  * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
@@ -218,260 +152,212 @@ sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
  * (which is pretty much unacceptable).
  *
  * TODO: simplify our interface;
- *       get rid of dev_desc and vhdl (seems redundant given a pci_dev);
  *       figure out how to save dmamap handle so can use two step.
  */
-dma_addr_t
-sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+                            int direction)
 {
        dma_addr_t dma_addr;
        unsigned long phys_addr;
-       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
-       struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
+       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
 
-       if (direction == PCI_DMA_NONE)
-               BUG();
-
-       if (bussoft == NULL) {
-               return 0;
-       }
-
-       if (! IS_PCI_BRIDGE_ASIC(bussoft->bs_asic_type)) {
-               return 0;               /* unsupported asic type */
-       }
-
-       /* SN cannot support DMA addresses smaller than 32 bits. */
-       if (hwdev->dma_mask < 0x7fffffff)
-               return 0;
-
-       /*
-        * Call our dmamap interface
-        */
+       BUG_ON(dev->bus != &pci_bus_type);
 
-       phys_addr = __pa(ptr);
+       phys_addr = __pa(cpu_addr);
        dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0);
        if (!dma_addr) {
-               printk(KERN_ERR "pci_map_single: Unable to allocate anymore "
-                      "page map entries.\n");
+               printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
                return 0;
        }
-       return ((dma_addr_t) dma_addr);
+       return dma_addr;
 }
+EXPORT_SYMBOL(sn_dma_map_single);
 
 /**
- * sn_pci_dma_sync_single_* - make sure all DMAs or CPU accesses
- * have completed
- * @hwdev: device to sync
- * @dma_handle: DMA address to sync
+ * sn_dma_unmap_single - unamp a DMA mapped page
+ * @dev: device to sync
+ * @dma_addr: DMA address to sync
  * @size: size of region
  * @direction: DMA direction
  *
  * This routine is supposed to sync the DMA region specified
- * by @dma_handle into the 'coherence domain'.  We do not need to do
- * anything on our platform.
+ * by @dma_handle into the coherence domain.  On SN, we're always cache
+ * coherent, so we just need to free any ATEs associated with this mapping.
  */
-void
-sn_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size,
-                   int direction)
+void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+                        int direction)
 {
-       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
-       struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
-
-       if (direction == PCI_DMA_NONE)
-               BUG();
-
-       if (bussoft == NULL) {
-               return;
-       }
-
-       if (! IS_PCI_BRIDGE_ASIC(bussoft->bs_asic_type)) {
-               return;         /* unsupported asic type */
-       }
+       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
 
+       BUG_ON(dev->bus != &pci_bus_type);
        pcibr_dma_unmap(pcidev_info, dma_addr, direction);
 }
+EXPORT_SYMBOL(sn_dma_unmap_single);
 
 /**
- * sn_dma_supported - test a DMA mask
- * @hwdev: device to test
- * @mask: DMA mask to test
+ * sn_dma_unmap_sg - unmap a DMA scatterlist
+ * @dev: device to unmap
+ * @sg: scatterlist to unmap
+ * @nhwentries: number of scatterlist entries
+ * @direction: DMA direction
  *
- * Return whether the given PCI device DMA address mask can be supported
- * properly.  For example, if your device can only drive the low 24-bits
- * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
- * this function.  Of course, SN only supports devices that have 32 or more
- * address bits when using the PMU.  We could theoretically support <32 bit
- * cards using direct mapping, but we'll worry about that later--on the off
- * chance that someone actually wants to use such a card.
+ * Unmap a set of streaming mode DMA translations.
  */
-int sn_pci_dma_supported(struct pci_dev *hwdev, u64 mask)
+void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+                    int nhwentries, int direction)
 {
-       if (mask < 0x7fffffff)
-               return 0;
-       return 1;
-}
-
-/*
- * New generic DMA routines just wrap sn2 PCI routines until we
- * support other bus types (if ever).
- */
+       int i;
+       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
 
-int sn_dma_supported(struct device *dev, u64 mask)
-{
        BUG_ON(dev->bus != &pci_bus_type);
 
-       return sn_pci_dma_supported(to_pci_dev(dev), mask);
+       for (i = 0; i < nhwentries; i++, sg++) {
+               pcibr_dma_unmap(pcidev_info, sg->dma_address, direction);
+               sg->dma_address = (dma_addr_t) NULL;
+               sg->dma_length = 0;
+       }
 }
+EXPORT_SYMBOL(sn_dma_unmap_sg);
 
-EXPORT_SYMBOL(sn_dma_supported);
-
-int sn_dma_set_mask(struct device *dev, u64 dma_mask)
+/**
+ * sn_dma_map_sg - map a scatterlist for DMA
+ * @dev: device to map for
+ * @sg: scatterlist to map
+ * @nhwentries: number of entries
+ * @direction: direction of the DMA transaction
+ *
+ * Maps each entry of @sg for DMA.
+ */
+int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+                 int direction)
 {
+       unsigned long phys_addr;
+       struct scatterlist *saved_sg = sg;
+       struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+       int i;
+
        BUG_ON(dev->bus != &pci_bus_type);
 
-       if (!sn_dma_supported(dev, dma_mask))
-               return 0;
+       /*
+        * Setup a DMA address for each entry in the scatterlist.
+        */
+       for (i = 0; i < nhwentries; i++, sg++) {
+               phys_addr = SG_ENT_PHYS_ADDRESS(sg);
+               sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr,
+                                               sg->length, 0);
 
-       *dev->dma_mask = dma_mask;
-       return 1;
-}
+               if (!sg->dma_address) {
+                       printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
 
-EXPORT_SYMBOL(sn_dma_set_mask);
+                       /*
+                        * Free any successfully allocated entries.
+                        */
+                       if (i > 0)
+                               sn_dma_unmap_sg(dev, saved_sg, i, direction);
+                       return 0;
+               }
 
-void *sn_dma_alloc_coherent(struct device *dev, size_t size,
-                           dma_addr_t * dma_handle, int flag)
-{
-       BUG_ON(dev->bus != &pci_bus_type);
+               sg->dma_length = sg->length;
+       }
 
-       return sn_pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
+       return nhwentries;
 }
+EXPORT_SYMBOL(sn_dma_map_sg);
 
-EXPORT_SYMBOL(sn_dma_alloc_coherent);
-
-void
-sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-                    dma_addr_t dma_handle)
+void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+                               size_t size, int direction)
 {
        BUG_ON(dev->bus != &pci_bus_type);
-
-       sn_pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
 }
+EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
 
-EXPORT_SYMBOL(sn_dma_free_coherent);
-
-dma_addr_t
-sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
-                 int direction)
+void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+                                  size_t size, int direction)
 {
        BUG_ON(dev->bus != &pci_bus_type);
-
-       return sn_pci_map_single(to_pci_dev(dev), cpu_addr, size,
-                                (int)direction);
 }
+EXPORT_SYMBOL(sn_dma_sync_single_for_device);
 
-EXPORT_SYMBOL(sn_dma_map_single);
-
-void
-sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                   int direction)
+void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+                           int nelems, int direction)
 {
        BUG_ON(dev->bus != &pci_bus_type);
-
-       sn_pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
 }
+EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
 
-EXPORT_SYMBOL(sn_dma_unmap_single);
-
-dma_addr_t
-sn_dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, int direction)
+void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+                              int nelems, int direction)
 {
        BUG_ON(dev->bus != &pci_bus_type);
-
-       return pci_map_page(to_pci_dev(dev), page, offset, size,
-                           (int)direction);
 }
+EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
 
-EXPORT_SYMBOL(sn_dma_map_page);
-
-void
-sn_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-                 int direction)
+int sn_dma_mapping_error(dma_addr_t dma_addr)
 {
-       BUG_ON(dev->bus != &pci_bus_type);
-
-       pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
+       return 0;
 }
+EXPORT_SYMBOL(sn_dma_mapping_error);
 
-EXPORT_SYMBOL(sn_dma_unmap_page);
-
-int
-sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-             int direction)
+char *sn_pci_get_legacy_mem(struct pci_bus *bus)
 {
-       BUG_ON(dev->bus != &pci_bus_type);
+       if (!SN_PCIBUS_BUSSOFT(bus))
+               return ERR_PTR(-ENODEV);
 
-       return sn_pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
+       return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
 }
 
-EXPORT_SYMBOL(sn_dma_map_sg);
-
-void
-sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-               int direction)
+int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
 {
-       BUG_ON(dev->bus != &pci_bus_type);
-
-       sn_pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
-}
+       unsigned long addr;
+       int ret;
 
-EXPORT_SYMBOL(sn_dma_unmap_sg);
+       if (!SN_PCIBUS_BUSSOFT(bus))
+               return -ENODEV;
 
-void
-sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                          size_t size, int direction)
-{
-       BUG_ON(dev->bus != &pci_bus_type);
-}
+       addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
+       addr += port;
 
-EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
+       ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
 
-void
-sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
-                             size_t size, int direction)
-{
-       BUG_ON(dev->bus != &pci_bus_type);
-}
+       if (ret == 2)
+               return -EINVAL;
 
-EXPORT_SYMBOL(sn_dma_sync_single_for_device);
+       if (ret == 1)
+               *val = -1;
 
-void
-sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-                      int direction)
-{
-       BUG_ON(dev->bus != &pci_bus_type);
+       return size;
 }
 
-EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
-
-void
-sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-                         int nelems, int direction)
+int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
 {
-       BUG_ON(dev->bus != &pci_bus_type);
-}
+       int ret = size;
+       unsigned long paddr;
+       unsigned long *addr;
 
-int sn_dma_mapping_error(dma_addr_t dma_addr)
-{
-       return 0;
-}
+       if (!SN_PCIBUS_BUSSOFT(bus)) {
+               ret = -ENODEV;
+               goto out;
+       }
 
-EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
-EXPORT_SYMBOL(sn_pci_unmap_single);
-EXPORT_SYMBOL(sn_pci_map_single);
-EXPORT_SYMBOL(sn_pci_map_sg);
-EXPORT_SYMBOL(sn_pci_unmap_sg);
-EXPORT_SYMBOL(sn_pci_alloc_consistent);
-EXPORT_SYMBOL(sn_pci_free_consistent);
-EXPORT_SYMBOL(sn_pci_dma_supported);
-EXPORT_SYMBOL(sn_dma_mapping_error);
+       /* Put the phys addr in uncached space */
+       paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
+       paddr += port;
+       addr = (unsigned long *)paddr;
+
+       switch (size) {
+       case 1:
+               *(volatile u8 *)(addr) = (u8)(val);
+               break;
+       case 2:
+               *(volatile u16 *)(addr) = (u16)(val);
+               break;
+       case 4:
+               *(volatile u32 *)(addr) = (u32)(val);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+ out:
+       return ret;
+}