#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
CONFIG_BROKEN_ON_SMP=y
#
#
# Generic Driver Options
#
+CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
CONFIG_MTD_PARTITIONS=m
CONFIG_MTD_CONCAT=m
CONFIG_MTD_REDBOOT_PARTS=m
-CONFIG_MTD_CMDLINE_PARTS=m
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
#
# User Modules And Translation Layers
CONFIG_MTD_JEDECPROBE=m
CONFIG_MTD_GEN_PROBE=m
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
CONFIG_MTD_CFI_INTELEXT=m
CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_AMDSTD_RETRY=3
CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_CFI_UTIL=m
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
-# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_MTD_ELAN_104NC=m
CONFIG_MTD_SCx200_DOCFLASH=m
CONFIG_MTD_AMD76XROM=m
-CONFIG_MTD_ICH2ROM=m
+# CONFIG_MTD_ICHXROM is not set
CONFIG_MTD_SCB2_FLASH=m
# CONFIG_MTD_NETtel is not set
# CONFIG_MTD_DILNETPC is not set
# CONFIG_MTD_PMC551_BUGFIX is not set
# CONFIG_MTD_PMC551_DEBUG is not set
# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
CONFIG_MTD_MTDRAM=m
CONFIG_MTDRAM_TOTAL_SIZE=4096
CONFIG_MTDRAM_ERASE_SIZE=128
# CONFIG_MTD_DOC2001 is not set
CONFIG_MTD_DOC2001PLUS=m
CONFIG_MTD_DOCPROBE=m
+CONFIG_MTD_DOCECC=m
# CONFIG_MTD_DOCPROBE_ADVANCED is not set
CONFIG_MTD_DOCPROBE_ADDRESS=0
CONFIG_MTD_NAND=m
# CONFIG_MTD_NAND_VERIFY_WRITE is not set
CONFIG_MTD_NAND_IDS=m
+# CONFIG_MTD_NAND_DISKONCHIP is not set
#
# Parallel port support
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-# CONFIG_BLK_DEV_IDESCSI is not set
+CONFIG_BLK_DEV_IDESCSI=m
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CLK_JIFFIES=y
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
+CONFIG_TUX=m
+
+#
+# TUX options
+#
+CONFIG_TUX_EXTCGI=y
+# CONFIG_TUX_EXTENDED_LOG is not set
+# CONFIG_TUX_DEBUG is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
+# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_FS=m
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_NAND=y
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
+CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
-CONFIG_DEBUG_HIGHMEM=y
+# CONFIG_DEBUG_HIGHMEM is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MD5=m
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_AES_586=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
drivers/net/arm/Kconfig \
drivers/net/arcnet/Kconfig \
drivers/net/Kconfig \
+ net/tux/Kconfig \
drivers/bluetooth/Kconfig \
net/bluetooth/hidp/Kconfig \
net/bluetooth/cmtp/Kconfig \
CONFIG_PCI_BIOS=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_MSI=y
CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_NAMES is not set
# CONFIG_PCI_USE_VECTOR is not set
# CONFIG_MTD_NAND_VERIFY_WRITE is not set
CONFIG_MTD_NAND_IDS=m
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+CONFIG_MTD_CFI_AMDSTD_RETRY=3
+# CONFIG_MTD_ICHXROM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+
+
#
# Parallel port support
#
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-# CONFIG_BLK_DEV_IDESCSI is not set
+CONFIG_BLK_DEV_IDESCSI=m
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_VIDEO_SELECT=y
CONFIG_FB_HGA=m
CONFIG_FB_RIVA=m
+# CONFIG_FB_RIVA_DEBUG is not set
# CONFIG_FB_RIVA_I2C is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
+CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
#
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
-CONFIG_DEBUG_HIGHMEM=y
+# CONFIG_DEBUG_HIGHMEM is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_AES_GENERIC=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_TUX=m
CONFIG_NVRAM=m
CONFIG_IBM_ASM=m
+CONFIG_CRYPTO_AES_586=m
CONFIG_M686=y
# CONFIG_NOHIGHMEM is not set
# CONFIG_SMP is not set
struct pci_dev *pdev;
...
- if (pci_set_dma_mask(pdev, PLAYBACK_ADDRESS_BITS)) {
+ if (!pci_set_dma_mask(pdev, PLAYBACK_ADDRESS_BITS)) {
card->playback_enabled = 1;
} else {
card->playback_enabled = 0;
printk(KERN_WARN "%s: Playback disabled due to DMA limitations.\n",
card->name);
}
- if (pci_set_dma_mask(pdev, RECORD_ADDRESS_BITS)) {
+ if (!pci_set_dma_mask(pdev, RECORD_ADDRESS_BITS)) {
card->record_enabled = 1;
} else {
card->record_enabled = 0;
modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
preaction=<preaction type> preop=<preop type> start_now=x
+ nowayout=x
The timeout is the number of seconds to the action, and the pretimeout
is the amount of seconds before the reset that the pre-timeout panic will
If start_now is set to 1, the watchdog timer will start running as
soon as the driver is loaded.
+If nowayout is set to 1, the watchdog timer will not stop when the
+watchdog device is closed. The default value of nowayout is true
+if the CONFIG_WATCHDOG_NOWAYOUT option is enabled, or false if not.
+
When compiled into the kernel, the kernel command line is available
for configuring the watchdog:
ipmi_watchdog.preaction=<preaction type>
ipmi_watchdog.preop=<preop type>
ipmi_watchdog.start_now=x
+ ipmi_watchdog.nowayout=x
The options are the same as the module parameter options.
10/03/2003
Revised Feb 12, 2004 by Martine Silbermann
email: Martine.Silbermann@hp.com
+ Revised Jun 25, 2004 by Tom L Nguyen
1. About this guide
-This guide describes the basics of Message Signaled Interrupts(MSI), the
-advantages of using MSI over traditional interrupt mechanisms, and how
-to enable your driver to use MSI or MSI-X. Also included is a Frequently
-Asked Questions.
+This guide describes the basics of Message Signaled Interrupts (MSI),
+the advantages of using MSI over traditional interrupt mechanisms,
+and how to enable your driver to use MSI or MSI-X. Also included is
+a Frequently Asked Questions.
2. Copyright 2003 Intel Corporation
the MSI/MSI-X capability structure in its PCI capability list. The
device function may implement both the MSI capability structure and
the MSI-X capability structure; however, the bus driver should not
-enable both, but instead enable only the MSI-X capability structure.
+enable both.
The MSI capability structure contains Message Control register,
Message Address register and Message Data register. These registers
support for better interrupt performance.
Using MSI enables the device functions to support two or more
-vectors, which can be configure to target different CPU's to
+vectors, which can be configured to target different CPU's to
increase scalability.
5. Configuring a driver to use MSI/MSI-X
By default, the kernel will not enable MSI/MSI-X on all devices that
-support this capability. The CONFIG_PCI_USE_VECTOR kernel option
+support this capability. The CONFIG_PCI_MSI kernel option
must be selected to enable MSI/MSI-X support.
-5.1 Including MSI support into the kernel
+5.1 Including MSI/MSI-X support into the kernel
-To allow MSI-Capable device drivers to selectively enable MSI (using
-pci_enable_msi as described below), the VECTOR based scheme needs to
-be enabled by setting CONFIG_PCI_USE_VECTOR.
+To allow MSI/MSI-X capable device drivers to selectively enable
+MSI/MSI-X (using pci_enable_msi()/pci_enable_msix() as described
+below), the VECTOR based scheme needs to be enabled by setting
+CONFIG_PCI_MSI during kernel config.
Since the target of the inbound message is the local APIC, providing
-CONFIG_PCI_USE_VECTOR is dependent on whether CONFIG_X86_LOCAL_APIC
-is enabled or not.
+CONFIG_X86_LOCAL_APIC must be enabled as well as CONFIG_PCI_MSI.
-int pci_enable_msi(struct pci_dev *)
+5.2 Configuring for MSI support
+
+Due to the non-contiguous fashion in vector assignment of the
+existing Linux kernel, this version does not support multiple
+messages regardless of a device function is capable of supporting
+more than one vector. To enable MSI on a device function's MSI
+capability structure requires a device driver to call the function
+pci_enable_msi() explicitly.
+
+5.2.1 API pci_enable_msi
+
+int pci_enable_msi(struct pci_dev *dev)
With this new API, any existing device driver, which like to have
-MSI enabled on its device function, must call this explicitly. A
-successful call will initialize the MSI/MSI-X capability structure
-with ONE vector, regardless of whether the device function is
+MSI enabled on its device function, must call this API to enable MSI
+A successful call will initialize the MSI capability structure
+with ONE vector, regardless of whether a device function is
capable of supporting multiple messages. This vector replaces the
pre-assigned dev->irq with a new MSI vector. To avoid the conflict
of new assigned vector with existing pre-assigned vector requires
-the device driver to call this API before calling request_irq(...).
+a device driver to call this API before calling request_irq().
+
+5.2.2 API pci_disable_msi
+
+void pci_disable_msi(struct pci_dev *dev)
+
+This API should always be used to undo the effect of pci_enable_msi()
+when a device driver is unloading. This API restores dev->irq with
+the pre-assigned IOAPIC vector and switches a device's interrupt
+mode to PCI pin-irq assertion/INTx emulation mode.
+
+Note that a device driver should always call free_irq() on MSI vector
+it has done request_irq() on before calling this API. Failure to do
+so results a BUG_ON() and a device will be left with MSI enabled and
+leaks its vector.
+
+5.2.3 MSI mode vs. legacy mode diagram
The below diagram shows the events, which switches the interrupt
mode on the MSI-capable device function between MSI mode and
| | <=============== | |
| MSI MODE | | PIN-IRQ ASSERTION MODE |
| | ===============> | |
- ------------ free_irq ------------------------
+ ------------ pci_disable_msi ------------------------
-5.2 Configuring for MSI support
-Due to the non-contiguous fashion in vector assignment of the
-existing Linux kernel, this version does not support multiple
-messages regardless of the device function is capable of supporting
-more than one vector. The bus driver initializes only entry 0 of
-this capability if pci_enable_msi(...) is called successfully by
-the device driver.
+Figure 1.0 MSI Mode vs. Legacy Mode
+
+In Figure 1.0, a device operates by default in legacy mode. Legacy
+in this context means PCI pin-irq assertion or PCI-Express INTx
+emulation. A successful MSI request (using pci_enable_msi()) switches
+a device's interrupt mode to MSI mode. A pre-assigned IOAPIC vector
+stored in dev->irq will be saved by the PCI subsystem and a new
+assigned MSI vector will replace dev->irq.
+
+To return back to its default mode, a device driver should always call
+pci_disable_msi() to undo the effect of pci_enable_msi(). Note that a
+device driver should always call free_irq() on MSI vector it has done
+request_irq() on before calling pci_disable_msi(). Failure to do so
+results a BUG_ON() and a device will be left with MSI enabled and
+leaks its vector. Otherwise, the PCI subsystem restores a device's
+dev->irq with a pre-assigned IOAPIC vector and marks released
+MSI vector as unused.
+
+Once being marked as unused, there is no guarantee that the PCI
+subsystem will reserve this MSI vector for a device. Depending on
+the availability of current PCI vector resources and the number of
+MSI/MSI-X requests from other drivers, this MSI may be re-assigned.
+
+For the case where the PCI subsystem re-assigned this MSI vector
+another driver, a request to switching back to MSI mode may result
+in being assigned a different MSI vector or a failure if no more
+vectors are available.
5.3 Configuring for MSI-X support
-Both the MSI capability structure and the MSI-X capability structure
-share the same above semantics; however, due to the ability of the
-system software to configure each vector of the MSI-X capability
-structure with an independent message address and message data, the
-non-contiguous fashion in vector assignment of the existing Linux
-kernel has no impact on supporting multiple messages on an MSI-X
-capable device functions. By default, as mentioned above, ONE vector
-should be always allocated to the MSI-X capability structure at
-entry 0. The bus driver does not initialize other entries of the
-MSI-X table.
-
-Note that the PCI subsystem should have full control of a MSI-X
-table that resides in Memory Space. The software device driver
-should not access this table.
-
-To request for additional vectors, the device software driver should
-call function msi_alloc_vectors(). It is recommended that the
-software driver should call this function once during the
+Due to the ability of the system software to configure each vector of
+the MSI-X capability structure with an independent message address
+and message data, the non-contiguous fashion in vector assignment of
+the existing Linux kernel has no impact on supporting multiple
+messages on an MSI-X capable device functions. To enable MSI-X on
+a device function's MSI-X capability structure requires its device
+driver to call the function pci_enable_msix() explicitly.
+
+The function pci_enable_msix(), once invoked, enables either
+all or nothing, depending on the current availability of PCI vector
+resources. If the PCI vector resources are available for the number
+of vectors requested by a device driver, this function will configure
+the MSI-X table of the MSI-X capability structure of a device with
+requested messages. To emphasize this reason, for example, a device
+may be capable for supporting the maximum of 32 vectors while its
+software driver usually may request 4 vectors. It is recommended
+that the device driver should call this function once during the
initialization phase of the device driver.
-The function msi_alloc_vectors(), once invoked, enables either
-all or nothing, depending on the current availability of vector
-resources. If no vector resources are available, the device function
-still works with ONE vector. If the vector resources are available
-for the number of vectors requested by the driver, this function
-will reconfigure the MSI-X capability structure of the device with
-additional messages, starting from entry 1. To emphasize this
-reason, for example, the device may be capable for supporting the
-maximum of 32 vectors while its software driver usually may request
-4 vectors.
-
-For each vector, after this successful call, the device driver is
-responsible to call other functions like request_irq(), enable_irq(),
-etc. to enable this vector with its corresponding interrupt service
-handler. It is the device driver's choice to have all vectors shared
-the same interrupt service handler or each vector with a unique
-interrupt service handler.
-
-In addition to the function msi_alloc_vectors(), another function
-msi_free_vectors() is provided to allow the software driver to
-release a number of vectors back to the vector resources. Once
-invoked, the PCI subsystem disables (masks) each vector released.
-These vectors are no longer valid for the hardware device and its
-software driver to use. Like free_irq, it recommends that the
-device driver should also call msi_free_vectors to release all
-additional vectors previously requested.
-
-int msi_alloc_vectors(struct pci_dev *dev, int *vector, int nvec)
-
-This API enables the software driver to request the PCI subsystem
-for additional messages. Depending on the number of vectors
-available, the PCI subsystem enables either all or nothing.
+Unlike the function pci_enable_msi(), the function pci_enable_msix()
+does not replace the pre-assigned IOAPIC dev->irq with a new MSI
+vector because the PCI subsystem writes the 1:1 vector-to-entry mapping
+into the field vector of each element contained in a second argument.
+Note that the pre-assigned IO-APIC dev->irq is valid only if the device
+operates in PIN-IRQ assertion mode. In MSI-X mode, any attempt of
+using dev->irq by the device driver to request for interrupt service
+may result unpredictabe behavior.
+
+For each MSI-X vector granted, a device driver is responsible to call
+other functions like request_irq(), enable_irq(), etc. to enable
+this vector with its corresponding interrupt service handler. It is
+a device driver's choice to assign all vectors with the same
+interrupt service handler or each vector with a unique interrupt
+service handler.
+
+5.3.1 Handling MMIO address space of MSI-X Table
+
+The PCI 3.0 specification has implementation notes that MMIO address
+space for a device's MSI-X structure should be isolated so that the
+software system can set different page for controlling accesses to
+the MSI-X structure. The implementation of MSI patch requires the PCI
+subsystem, not a device driver, to maintain full control of the MSI-X
+table/MSI-X PBA and MMIO address space of the MSI-X table/MSI-X PBA.
+A device driver is prohibited from requesting the MMIO address space
+of the MSI-X table/MSI-X PBA. Otherwise, the PCI subsystem will fail
+enabling MSI-X on its hardware device when it calls the function
+pci_enable_msix().
+
+5.3.2 Handling MSI-X allocation
+
+Determining the number of MSI-X vectors allocated to a function is
+dependent on the number of MSI capable devices and MSI-X capable
+devices populated in the system. The policy of allocating MSI-X
+vectors to a function is defined as the following:
+
+#of MSI-X vectors allocated to a function = (x - y)/z where
+
+x = The number of available PCI vector resources by the time
+ the device driver calls pci_enable_msix(). The PCI vector
+ resources is the sum of the number of unassigned vectors
+ (new) and the number of released vectors when any MSI/MSI-X
+ device driver switches its hardware device back to a legacy
+ mode or is hot-removed. The number of unassigned vectors
+ may exclude some vectors reserved, as defined in parameter
+ NR_HP_RESERVED_VECTORS, for the case where the system is
+ capable of supporting hot-add/hot-remove operations. Users
+ may change the value defined in NR_HR_RESERVED_VECTORS to
+ meet their specific needs.
+
+y = The number of MSI capable devices populated in the system.
+ This policy ensures that each MSI capable device has its
+ vector reserved to avoid the case where some MSI-X capable
+ drivers may attempt to claim all available vector resources.
+
+z = The number of MSI-X capable devices pupulated in the system.
+ This policy ensures that maximum (x - y) is distributed
+ evenly among MSI-X capable devices.
+
+Note that the PCI subsystem scans y and z during a bus enumeration.
+When the PCI subsystem completes configuring MSI/MSI-X capability
+structure of a device as requested by its device driver, y/z is
+decremented accordingly.
+
+5.3.3 Handling MSI-X shortages
+
+For the case where fewer MSI-X vectors are allocated to a function
+than requested, the function pci_enable_msix() will return the
+maximum number of MSI-X vectors available to the caller. A device
+driver may re-send its request with fewer or equal vectors indicated
+in a return. For example, if a device driver requests 5 vectors, but
+the number of available vectors is 3 vectors, a value of 3 will be a
+return as a result of pci_enable_msix() call. A function could be
+designed for its driver to use only 3 MSI-X table entries as
+different combinations as ABC--, A-B-C, A--CB, etc. Note that this
+patch does not support multiple entries with the same vector. Such
+attempt by a device driver to use 5 MSI-X table entries with 3 vectors
+as ABBCC, AABCC, BCCBA, etc will result as a failure by the function
+pci_enable_msix(). Below are the reasons why supporting multiple
+entries with the same vector is an undesirable solution.
+
+ - The PCI subsystem can not determine which entry, which
+ generated the message, to mask/unmask MSI while handling
+ software driver ISR. Attempting to walk through all MSI-X
+ table entries (2048 max) to mask/unmask any match vector
+ is an undesirable solution.
+
+ - Walk through all MSI-X table entries (2048 max) to handle
+ SMP affinity of any match vector is an undesirable solution.
+
+5.3.4 API pci_enable_msix
+
+int pci_enable_msix(struct pci_dev *dev, u32 *entries, int nvec)
+
+This API enables a device driver to request the PCI subsystem
+for enabling MSI-X messages on its hardware device. Depending on
+the availability of PCI vectors resources, the PCI subsystem enables
+either all or nothing.
Argument dev points to the device (pci_dev) structure.
-Argument vector is a pointer of integer type. The number of
-elements is indicated in argument nvec.
+
+Argument entries is a pointer of unsigned integer type. The number of
+elements is indicated in argument nvec. The content of each element
+will be mapped to the following struct defined in /driver/pci/msi.h.
+
+struct msix_entry {
+ u16 vector; /* kernel uses to write alloc vector */
+ u16 entry; /* driver uses to specify entry */
+};
+
+A device driver is responsible for initializing the field entry of
+each element with unique entry supported by MSI-X table. Otherwise,
+-EINVAL will be returned as a result. A successful return of zero
+indicates the PCI subsystem completes initializing each of requested
+entries of the MSI-X table with message address and message data.
+Last but not least, the PCI subsystem will write the 1:1
+vector-to-entry mapping into the field vector of each element. A
+device driver is responsible of keeping track of allocated MSI-X
+vectors in its internal data structure.
+
Argument nvec is an integer indicating the number of messages
requested.
-A return of zero indicates that the number of allocated vector is
-successfully allocated. Otherwise, indicate resources not
-available.
-int msi_free_vectors(struct pci_dev* dev, int *vector, int nvec)
+A return of zero indicates that the number of MSI-X vectors is
+successfully allocated. A return of greater than zero indicates
+MSI-X vector shortage. Or a return of less than zero indicates
+a failure. This failure may be a result of duplicate entries
+specified in second argument, or a result of no available vector,
+or a result of failing to initialize MSI-X table entries.
-This API enables the software driver to inform the PCI subsystem
-that it is willing to release a number of vectors back to the
-MSI resource pool. Once invoked, the PCI subsystem disables each
-MSI-X entry associated with each vector stored in the argument 2.
-These vectors are no longer valid for the hardware device and
-its software driver to use.
+5.3.5 API pci_disable_msix
-Argument dev points to the device (pci_dev) structure.
-Argument vector is a pointer of integer type. The number of
-elements is indicated in argument nvec.
-Argument nvec is an integer indicating the number of messages
-released.
-A return of zero indicates that the number of allocated vectors
-is successfully released. Otherwise, indicates a failure.
+void pci_disable_msix(struct pci_dev *dev)
-5.4 Hardware requirements for MSI support
-MSI support requires support from both system hardware and
+This API should always be used to undo the effect of pci_enable_msix()
+when a device driver is unloading. Note that a device driver should
+always call free_irq() on all MSI-X vectors it has done request_irq()
+on before calling this API. Failure to do so results a BUG_ON() and
+a device will be left with MSI-X enabled and leaks its vectors.
+
+5.3.6 MSI-X mode vs. legacy mode diagram
+
+The below diagram shows the events, which switches the interrupt
+mode on the MSI-X capable device function between MSI-X mode and
+PIN-IRQ assertion mode (legacy).
+
+ ------------ pci_enable_msix(,,n) ------------------------
+ | | <=============== | |
+ | MSI-X MODE | | PIN-IRQ ASSERTION MODE |
+ | | ===============> | |
+ ------------ pci_disable_msix ------------------------
+
+Figure 2.0 MSI-X Mode vs. Legacy Mode
+
+In Figure 2.0, a device operates by default in legacy mode. A
+successful MSI-X request (using pci_enable_msix()) switches a
+device's interrupt mode to MSI-X mode. A pre-assigned IOAPIC vector
+stored in dev->irq will be saved by the PCI subsystem; however,
+unlike MSI mode, the PCI subsystem will not replace dev->irq with
+assigned MSI-X vector because the PCI subsystem already writes the 1:1
+vector-to-entry mapping into the field vector of each element
+specified in second argument.
+
+To return back to its default mode, a device driver should always call
+pci_disable_msix() to undo the effect of pci_enable_msix(). Note that
+a device driver should always call free_irq() on all MSI-X vectors it
+has done request_irq() on before calling pci_disable_msix(). Failure
+to do so results a BUG_ON() and a device will be left with MSI-X
+enabled and leaks its vectors. Otherwise, the PCI subsystem switches a
+device function's interrupt mode from MSI-X mode to legacy mode and
+marks all allocated MSI-X vectors as unused.
+
+Once being marked as unused, there is no guarantee that the PCI
+subsystem will reserve these MSI-X vectors for a device. Depending on
+the availability of current PCI vector resources and the number of
+MSI/MSI-X requests from other drivers, these MSI-X vectors may be
+re-assigned.
+
+For the case where the PCI subsystem re-assigned these MSI-X vectors
+to other driver, a request to switching back to MSI-X mode may result
+being assigned with another set of MSI-X vectors or a failure if no
+more vectors are available.
+
+5.4 Handling function implementng both MSI and MSI-X capabilities
+
+For the case where a function implements both MSI and MSI-X
+capabilities, the PCI subsystem enables a device to run either in MSI
+mode or MSI-X mode but not both. A device driver determines whether it
+wants MSI or MSI-X enabled on its hardware device. Once a device
+driver requests for MSI, for example, it is prohibited to request for
+MSI-X; in other words, a device driver is not permitted to ping-pong
+between MSI mod MSI-X mode during a run-time.
+
+5.5 Hardware requirements for MSI/MSI-X support
+MSI/MSI-X support requires support from both system hardware and
individual hardware device functions.
-5.4.1 System hardware support
+5.5.1 System hardware support
Since the target of MSI address is the local APIC CPU, enabling
-MSI support in Linux kernel is dependent on whether existing
+MSI/MSI-X support in Linux kernel is dependent on whether existing
system hardware supports local APIC. Users should verify their
system whether it runs when CONFIG_X86_LOCAL_APIC=y.
In SMP environment, CONFIG_X86_LOCAL_APIC is automatically set;
however, in UP environment, users must manually set
CONFIG_X86_LOCAL_APIC. Once CONFIG_X86_LOCAL_APIC=y, setting
-CONFIG_PCI_USE_VECTOR enables the VECTOR based scheme and
+CONFIG_PCI_MSI enables the VECTOR based scheme and
the option for MSI-capable device drivers to selectively enable
-MSI (using pci_enable_msi as described below).
+MSI/MSI-X.
-Note that CONFIG_X86_IO_APIC setting is irrelevant because MSI
-vector is allocated new during runtime and MSI support does not
-depend on BIOS support. This key independency enables MSI support
-on future IOxAPIC free platform.
+Note that CONFIG_X86_IO_APIC setting is irrelevant because MSI/MSI-X
+vector is allocated new during runtime and MSI/MSI-X support does not
+depend on BIOS support. This key independency enables MSI/MSI-X
+support on future IOxAPIC free platform.
-5.4.2 Device hardware support
+5.5.2 Device hardware support
The hardware device function supports MSI by indicating the
MSI/MSI-X capability structure on its PCI capability list. By
default, this capability structure will not be initialized by
MSI-capable hardware is responsible for whether calling
pci_enable_msi or not. A return of zero indicates the kernel
successfully initializes the MSI/MSI-X capability structure of the
-device funtion. The device function is now running on MSI mode.
+device funtion. The device function is now running on MSI/MSI-X mode.
-5.5 How to tell whether MSI is enabled on device function
+5.6 How to tell whether MSI/MSI-X is enabled on device function
-At the driver level, a return of zero from pci_enable_msi(...)
-indicates to the device driver that its device function is
-initialized successfully and ready to run in MSI mode.
+At the driver level, a return of zero from the function call of
+pci_enable_msi()/pci_enable_msix() indicates to a device driver that
+its device function is initialized successfully and ready to run in
+MSI/MSI-X mode.
At the user level, users can use command 'cat /proc/interrupts'
-to display the vector allocated for the device and its interrupt
-mode, as shown below.
+to display the vector allocated for a device and its interrupt
+MSI/MSI-X mode ("PCI MSI"/"PCI MSIX"). Below shows below MSI mode is
+enabled on a SCSI Adaptec 39320D Ultra320.
CPU0 CPU1
0: 324639 0 IO-APIC-edge timer
option to the kernel via the tagged lists specifying the port, and
serial format options as described in
- linux/Documentation/kernel-parameters.txt.
+ Documentation/kernel-parameters.txt.
3. Detect the machine type
$Id: README.aztcd,v 2.60 1997/11/29 09:51:25 root Exp root $
- Readme-File /usr/src/Documentation/cdrom/aztcd
+ Readme-File Documentation/cdrom/aztcd
for
AZTECH CD-ROM CDA268-01A, ORCHID CD-3110,
OKANO/WEARNES CDD110, CONRAD TXC, CyCDROM CR520, CR540
A reworked and improved version called 'cdtester.c', which has yet more
features for testing CDROM-drives can be found in
-/usr/src/linux/Documentation/cdrom/sbpcd, written by E.Moenkeberg.
+Documentation/cdrom/sbpcd, written by E.Moenkeberg.
Werner Zimmermann
Fachhochschule fuer Technik Esslingen
To create the ip2mkdev shell script change to a convenient directory (/tmp
works just fine) and run the following command:
- unshar /usr/src/linux/Documentation/computone.txt
+ unshar Documentation/computone.txt
(This file)
You should now have a file ip2mkdev in your current working directory with
Herbert Valerio Riedel
Kyle McMartin
Adam J. Richter
+ Fruhwirth Clemens (i586)
+ Linus Torvalds (i586)
CAST5 algorithm contributors:
Kartikey Mahendra Bhatt (original developers unknown, FSF copyright).
32 = /dev/ttyDB0 DataBooster serial port 0
...
39 = /dev/ttyDB7 DataBooster serial port 7
+ 40 = /dev/ttySG0 SGI Altix console port
205 char Low-density serial ports (alternate device)
0 = /dev/culu0 Callout device for ttyLU0
32 = /dev/cudb0 Callout device for ttyDB0
...
39 = /dev/cudb7 Callout device for ttyDB7
+ 40 = /dev/cusg0 Callout device for ttySG0
206 char OnStream SC-x0 tape devices
0 = /dev/osst0 First OnStream SCSI tape, mode 0
Supporting Tools:
-----------------
Supporting tools include digiDload, digiConfig, buildPCI, and ditty. See
-/usr/src/linux/Documentation/README.epca.dir/user.doc for more details. Note,
+drivers/char/README.epca for more details. Note,
this driver REQUIRES that digiDload be executed prior to it being used.
Failure to do this will result in an ENODEV error.
binary-only firmware.
The DVB drivers will be converted to use the request_firmware()
-hotplug interface (see linux/Documentation/firmware_class/).
+hotplug interface (see Documentation/firmware_class/).
(CONFIG_FW_LOADER)
The firmware can be loaded automatically via the hotplug manager
Hotplug Firmware Loading for 2.6 kernels
----------------------------------------
For 2.6 kernels the firmware is loaded at the point that the driver module is
-loaded. See linux/Documentation/dvb/firmware.txt for more information.
+loaded. See Documentation/dvb/firmware.txt for more information.
mv STB_PC_T.bin /usr/lib/hotplug/firmware/dvb-ttusb-dec-2000t.fw
mv STB_PC_X.bin /usr/lib/hotplug/firmware/dvb-ttusb-dec-2540t.fw
and thrash the system to death, so large and/or important servers will want to
set this value to 0.
+nr_hugepages and hugetlb_shm_group
+----------------------------------
+
+nr_hugepages configures number of hugetlb page reserved for the system.
+
+hugetlb_shm_group contains group id that is allowed to create SysV shared
+memory segment using hugetlb page.
+
+laptop_mode
+-----------
+
+laptop_mode is a knob that controls "laptop mode". All the things that are
+controlled by this knob are discussed in Documentation/laptop-mode.txt.
+
+block_dump
+----------
+
+block_dump enables block I/O debugging when set to a nonzero value. More
+information on block I/O debugging is in Documentation/laptop-mode.txt.
+
2.5 /proc/sys/dev - Device specific parameters
----------------------------------------------
command to write value into these files, thereby changing the default settings
of the kernel.
------------------------------------------------------------------------------
-
-
-
-
-
-
-
*
-* ./Documentation/filesystems/udf.txt
+* Documentation/filesystems/udf.txt
*
UDF Filesystem version 0.9.8.1
though not all of them are actually meaningful to the kernel. Boot
loader authors who need additional command line options for the boot
loader itself should get them registered in
-linux/Documentation/kernel-parameters.txt to make sure they will not
+Documentation/kernel-parameters.txt to make sure they will not
conflict with actual kernel options now or in the future.
vga=<mode>
module outside the kernel is to use the kernel build system,
kbuild. Use the following command-line:
-make -C path/to/kernel/src SUBDIRS=$PWD modules
+make -C path/to/kernel/src M=$PWD modules
This requires that a makefile exits made in accordance to
Documentation/kbuild/makefiles.txt. Read that file for more details on
# Invokes the kernel build system to come back to the current
# directory and build yourmodule.ko.
default:
- make -C ${KERNEL_SOURCE} SUBDIRS=`pwd` modules
+ make -C ${KERNEL_SOURCE} M=`pwd` modules
Document Author: Bart Samwel (bart@samwel.tk)
Date created: January 2, 2004
-Last modified: April 3, 2004
+Last modified: July 10, 2004
Introduction
------------
-Laptopmode is used to minimize the time that the hard disk needs to be spun up,
+Laptop mode is used to minimize the time that the hard disk needs to be spun up,
to conserve battery power on laptops. It has been reported to cause significant
power savings.
--------
* Introduction
-* The short story
+* Installation
* Caveats
-* The details
+* The Details
* Tips & Tricks
* Control script
* ACPI integration
* Monitoring tool
-The short story
----------------
+Installation
+------------
To use laptop mode, you don't need to set any kernel configuration options
-or anything. You simply need to run the laptop_mode control script (which
-is included in this document) as follows:
-
-# laptop_mode start
+or anything. Simply install all the files included in this document, and
+laptop mode will automatically be started when you're on battery. For
+your convenience, a tarball containing an installer can be downloaded at:
-Then set your harddisk spindown time to a relatively low value with hdparm:
+http://www.xs4all.nl/~bsamwel/laptop_mode/tools
-hdparm -S 4 /dev/hda
+To configure laptop mode, you need to edit the configuration file, which is
+located in /etc/default/laptop-mode on Debian-based systems, or in
+/etc/sysconfig/laptop-mode on other systems.
-The value -S 4 means 20 seconds idle time before spindown. Your harddisk will
-now only spin up when a disk cache miss occurs, or at least once every 10
-minutes to write back any pending changes.
-
-To stop laptop_mode, run "laptop_mode stop".
+Unfortunately, automatic enabling of laptop mode does not work for
+laptops that don't have ACPI. On those laptops, you need to start laptop
+mode manually. To start laptop mode, run "laptop_mode start", and to
+stop it, run "laptop_mode stop". (Note: The laptop mode tools package now
+has experimental support for APM, you might want to try that first.)
Caveats
-------
-* The downside of laptop mode is that you have a chance of losing up
- to 10 minutes of work. If you cannot afford this, don't use it! It's
- wise to turn OFF laptop mode when you're almost out of battery --
- although this will make the battery run out faster, at least you'll
- lose less work when it actually runs out. I'm still looking for someone
- to submit instructions on how to turn off laptop mode when battery is low,
- e.g., using ACPI events. I don't have a laptop myself, so if you do and
- you care to contribute such instructions, please do.
+* The downside of laptop mode is that you have a chance of losing up to 10
+ minutes of work. If you cannot afford this, don't use it! The supplied ACPI
+ scripts automatically turn off laptop mode when the battery almost runs out,
+ so that you won't lose any data at the end of your battery life.
* Most desktop hard drives have a very limited lifetime measured in spindown
cycles, typically about 50.000 times (it's usually listed on the spec sheet).
* If you have your filesystems listed as type "auto" in fstab, like I did, then
the control script will not recognize them as filesystems that need remounting.
+ You must list the filesystems with their true type instead.
* It has been reported that some versions of the mutt mail client use file access
times to determine whether a folder contains new mail. If you use mutt and
- experience this, you must disable the noatime remounting in the control script
- by setting DO_REMOUNT_NOATIME=0.
+ experience this, you must disable the noatime remounting by setting the option
+ DO_REMOUNT_NOATIME to 0 in the configuration file.
-The details
+The Details
-----------
-Laptop-mode is controlled by the flag /proc/sys/vm/laptop_mode. This flag is
+Laptop mode is controlled by the knob /proc/sys/vm/laptop_mode. This knob is
present for all kernels that have the laptop mode patch, regardless of any
-configuration options. When the flag is set, any physical disk read operation
-(that might have caused the hard disk to spin up) causes Linux to flush all dirty
-blocks. The result of this is that after a disk has spun down, it will not be spun
-up anymore to write dirty blocks, because those blocks had already been written
-immediately after the most recent read operation.
+configuration options. When the knob is set, any physical disk I/O (that might
+have caused the hard disk to spin up) causes Linux to flush all dirty blocks. The
+result of this is that after a disk has spun down, it will not be spun up
+anymore to write dirty blocks, because those blocks had already been written
+immediately after the most recent read operation. The value of the laptop_mode
+knob determines the time between the occurrence of disk I/O and when the flush
+is triggered. A sensible value for the knob is 5 seconds. Setting the knob to
+0 disables laptop mode.
To increase the effectiveness of the laptop_mode strategy, the laptop_mode
control script increases dirty_expire_centisecs and dirty_writeback_centisecs in
all block dirtyings done to files. This makes it possible to debug why a disk
needs to spin up, and to increase battery life even more. The output of
block_dump is written to the kernel output, and it can be retrieved using
-"dmesg". When you use block_dump, you may want to turn off klogd, otherwise
+"dmesg". When you use block_dump and your kernel logging level also includes
+kernel debugging messages, you probably want to turn off klogd, otherwise
the output of block_dump will be logged, causing disk activity that is not
normally there.
-If 10 minutes is too much or too little downtime for you, you can configure
-this downtime as follows. In the control script, set the MAX_AGE value to the
-maximum number of seconds of disk downtime that you would like. You should
-then set your filesystem's commit interval to the same value. The dirty ratio
-is also configurable from the control script.
-If you don't like the idea of the control script remounting your filesystems
-for you, you can change DO_REMOUNTS to 0 in the script.
+Configuration
+-------------
+
+The laptop mode configuration file is located in /etc/default/laptop-mode on
+Debian-based systems, or in /etc/sysconfig/laptop-mode on other systems. It
+contains the following options:
+
+MAX_AGE:
+
+Maximum time, in seconds, of hard drive spindown time that you are
+confortable with. Worst case, it's possible that you could lose this
+amount of work if your battery fails while you're in laptop mode.
+
+MINIMUM_BATTERY_MINUTES:
+
+Automatically disable laptop mode if the remaining number of minutes of
+battery power is less than this value. Default is 10 minutes.
+
+AC_HD/BATT_HD:
+
+The idle timeout that should be set on your hard drive when laptop mode
+is active (BATT_HD) and when it is not active (AC_HD). The defaults are
+20 seconds (value 4) for BATT_HD and 2 hours (value 244) for AC_HD. The
+possible values are those listed in the manual page for "hdparm" for the
+"-S" option.
+
+HD:
+
+The devices for which the spindown timeout should be adjusted by laptop mode.
+Default is /dev/hda. If you specify multiple devices, separate them by a space.
+
+READAHEAD:
+
+Disk readahead, in 512-byte sectors, while laptop mode is active. A large
+readahead can prevent disk accesses for things like executable pages (which are
+loaded on demand while the application executes) and sequentially accessed data
+(MP3s).
+
+DO_REMOUNTS:
+
+The control script automatically remounts any mounted journaled filesystems
+with approriate commit interval options. When this option is set to 0, this
+feature is disabled.
+
+DO_REMOUNT_NOATIME:
+
+When remounting, should the filesystems be remounted with the noatime option?
+Normally, this is set to "1" (enabled), but there may be programs that require
+access time recording.
+
+DIRTY_RATIO:
-Thanks to Kiko Piris, the control script can be used to enable laptop mode on
-both the Linux 2.4 and 2.6 series.
+The percentage of memory that is allowed to contain "dirty" or unsaved data
+before a writeback is forced, while laptop mode is active. Corresponds to
+the /proc/sys/vm/dirty_ratio sysctl.
+
+DIRTY_BACKGROUND_RATIO:
+
+The percentage of memory that is allowed to contain "dirty" or unsaved data
+after a forced writeback is done due to an exceeding of DIRTY_RATIO. Set
+this nice and low. This corresponds to the /proc/sys/vm/dirty_background_ratio
+sysctl.
+
+Note that the behaviour of dirty_background_ratio is quite different
+when laptop mode is active and when it isn't. When laptop mode is inactive,
+dirty_background_ratio is the threshold percentage at which background writeouts
+start taking place. When laptop mode is active, however, background writeouts
+are disabled, and the dirty_background_ratio only determines how much writeback
+is done when dirty_ratio is reached.
+
+DO_CPU:
+
+Enable CPU frequency scaling when in laptop mode. (Requires CPUFreq to be setup.
+See Documentation/cpu-freq/user-guide.txt for more info. Disabled by default.)
+
+CPU_MAXFREQ:
+
+When on battery, what is the maximum CPU speed that the system should use? Legal
+values are "slowest" for the slowest speed that your CPU is able to operate at,
+or a value listed in /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies.
Tips & Tricks
-------------
* Bartek Kania reports getting up to 50 minutes of extra battery life (on top
- of his regular 3 to 3.5 hours) using very aggressive power management (hdparm
- -B1) and a spindown time of 5 seconds (hdparm -S1).
+ of his regular 3 to 3.5 hours) using a spindown time of 5 seconds (BATT_HD=1).
-* You can spin down the disk while playing MP3, by setting the disk readahead
- to 8MB (hdparm -a 16384). Effectively, the disk will read a complete MP3 at
+* You can spin down the disk while playing MP3, by setting disk readahead
+ to 8MB (READAHEAD=16384). Effectively, the disk will read a complete MP3 at
once, and will then spin down while the MP3 is playing. (Thanks to Bartek
Kania.)
this on powerbooks too. I hope that this is a piece of information that
might be useful to the Laptop Mode patch or it's users."
-* One thing which will cause disks to spin up is not-present application
- and dynamic library text pages. The kernel will load program text off disk
- on-demand, so each time you invoke an application feature for the first
- time, the kernel needs to spin the disk up to go and fetch that part of the
- application.
-
- So it is useful to increase the disk readahead parameter greatly, so that
- the kernel will pull all of the executable's pages into memory on the first
- pagefault.
-
- The supplied script does this.
-
* In syslog.conf, you can prefix entries with a dash ``-'' to omit syncing the
file after every logging. When you're using laptop-mode and your disk doesn't
spin down, this is a likely culprit.
(http://noflushd.sourceforge.net/), it seems that noflushd prevents laptop-mode
from doing its thing.
+* If you're worried about your data, you might want to consider using a USB
+ memory stick or something like that as a "working area". (Be aware though
+ that flash memory can only handle a limited number of writes, and overuse
+ may wear out your memory stick pretty quickly. Do _not_ use journalling
+ filesystems on flash memory sticks.)
+
+
+Configuration file for control and ACPI battery scripts
+-------------------------------------------------------
+
+This allows the tunables to be changed for the scripts via an external
+configuration file
+
+It should be installed as /etc/default/laptop-mode on Debian, and as
+/etc/sysconfig/laptop-mode on Red Hat, SUSE, Mandrake, and other work-alikes.
+
+--------------------CONFIG FILE BEGIN-------------------------------------------
+# Maximum time, in seconds, of hard drive spindown time that you are
+# confortable with. Worst case, it's possible that you could lose this
+# amount of work if your battery fails you while in laptop mode.
+#MAX_AGE=600
+
+# Automatically disable laptop mode when the number of minutes of battery
+# that you have left goes below this threshold.
+MINIMUM_BATTERY_MINUTES=10
+
+# Read-ahead, in 512-byte sectors. You can spin down the disk while playing MP3/OGG
+# by setting the disk readahead to 8MB (READAHEAD=16384). Effectively, the disk
+# will read a complete MP3 at once, and will then spin down while the MP3/OGG is
+# playing.
+#READAHEAD=4096
+
+# Shall we remount journaled fs. with appropiate commit interval? (1=yes)
+#DO_REMOUNTS=1
+
+# And shall we add the "noatime" option to that as well? (1=yes)
+#DO_REMOUNT_NOATIME=1
+
+# Dirty synchronous ratio. At this percentage of dirty pages the process
+# which
+# calls write() does its own writeback
+#DIRTY_RATIO=40
+
+#
+# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
+# exceeded, the kernel will wake pdflush which will then reduce the amount
+# of dirty memory to dirty_background_ratio. Set this nice and low, so once
+# some writeout has commenced, we do a lot of it.
+#
+#DIRTY_BACKGROUND_RATIO=5
+
+# kernel default dirty buffer age
+#DEF_AGE=30
+#DEF_UPDATE=5
+#DEF_DIRTY_BACKGROUND_RATIO=10
+#DEF_DIRTY_RATIO=40
+#DEF_XFS_AGE_BUFFER=15
+#DEF_XFS_SYNC_INTERVAL=30
+#DEF_XFS_BUFD_INTERVAL=1
+
+# This must be adjusted manually to the value of HZ in the running kernel
+# on 2.4, until the XFS people change their 2.4 external interfaces to work in
+# centisecs. This can be automated, but it's a work in progress that still
+# needs# some fixes. On 2.6 kernels, XFS uses USER_HZ instead of HZ for
+# external interfaces, and that is currently always set to 100. So you don't
+# need to change this on 2.6.
+#XFS_HZ=100
+
+# Should the maximum CPU frequency be adjusted down while on battery?
+# Requires CPUFreq to be setup.
+# See Documentation/cpu-freq/user-guide.txt for more info
+#DO_CPU=0
+
+# When on battery what is the maximum CPU speed that the system should
+# use? Legal values are "slowest" for the slowest speed that your
+# CPU is able to operate at, or a value listed in:
+# /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
+# Only applicable if DO_CPU=1.
+#CPU_MAXFREQ=slowest
+
+# Idle timeout for your hard drive (man hdparm for valid values, -S option)
+# Default is 2 hours on AC (AC_HD=244) and 20 seconds for battery (BATT_HD=4).
+#AC_HD=244
+#BATT_HD=4
+
+# The drives for which to adjust the idle timeout. Separate them by a space,
+# e.g. HD="/dev/hda /dev/hdb".
+#HD="/dev/hda"
+
+# Set the spindown timeout on a hard drive?
+#DO_HD=1
+
+--------------------CONFIG FILE END---------------------------------------------
+
Control script
--------------
-Please note that this control script works for the Linux 2.4 and 2.6 series.
+Please note that this control script works for the Linux 2.4 and 2.6 series (thanks
+to Kiko Piris).
---------------------CONTROL SCRIPT BEGIN------------------------------------------
+--------------------CONTROL SCRIPT BEGIN----------------------------------------
#!/bin/bash
# start or stop laptop_mode, best run by a power management daemon when
#############################################################################
-# Age time, in seconds. should be put into a sysconfig file
-MAX_AGE=600
+# Source config
+if [ -f /etc/default/laptop-mode ] ; then
+ # Debian
+ . /etc/default/laptop-mode
+elif [ -f /etc/sysconfig/laptop-mode ] ; then
+ # Others
+ . /etc/sysconfig/laptop-mode
+fi
+
+# Don't raise an error if the config file is incomplete
+# set defaults instead:
+
+# Maximum time, in seconds, of hard drive spindown time that you are
+# confortable with. Worst case, it's possible that you could lose this
+# amount of work if your battery fails you while in laptop mode.
+MAX_AGE=${MAX_AGE:-'600'}
# Read-ahead, in kilobytes
-READAHEAD=4096
+READAHEAD=${READAHEAD:-'4096'}
# Shall we remount journaled fs. with appropiate commit interval? (1=yes)
-DO_REMOUNTS=1
+DO_REMOUNTS=${DO_REMOUNTS:-'1'}
# And shall we add the "noatime" option to that as well? (1=yes)
-DO_REMOUNT_NOATIME=1
+DO_REMOUNT_NOATIME=${DO_REMOUNT_NOATIME:-'1'}
+
+# Shall we adjust the idle timeout on a hard drive?
+DO_HD=${DO_HD:-'1'}
+
+# Adjust idle timeout on which hard drive?
+HD="${HD:-'/dev/hda'}"
+
+# spindown time for HD (hdparm -S values)
+AC_HD=${AC_HD:-'244'}
+BATT_HD=${BATT_HD:-'4'}
# Dirty synchronous ratio. At this percentage of dirty pages the process which
# calls write() does its own writeback
-DIRTY_RATIO=40
+DIRTY_RATIO=${DIRTY_RATIO:-'40'}
+
+# cpu frequency scaling
+# See Documentation/cpu-freq/user-guide.txt for more info
+DO_CPU=${CPU_MANAGE:-'0'}
+CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'}
#
# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
# of dirty memory to dirty_background_ratio. Set this nice and low, so once
# some writeout has commenced, we do a lot of it.
#
-DIRTY_BACKGROUND_RATIO=5
+DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'}
# kernel default dirty buffer age
-DEF_AGE=30
-DEF_UPDATE=5
-DEF_DIRTY_BACKGROUND_RATIO=10
-DEF_DIRTY_RATIO=40
-DEF_XFS_AGE_BUFFER=15
-DEF_XFS_SYNC_INTERVAL=30
-DEF_XFS_BUFD_INTERVAL=1
+DEF_AGE=${DEF_AGE:-'30'}
+DEF_UPDATE=${DEF_UPDATE:-'5'}
+DEF_DIRTY_BACKGROUND_RATIO=${DEF_DIRTY_BACKGROUND_RATIO:-'10'}
+DEF_DIRTY_RATIO=${DEF_DIRTY_RATIO:-'40'}
+DEF_XFS_AGE_BUFFER=${DEF_XFS_AGE_BUFFER:-'15'}
+DEF_XFS_SYNC_INTERVAL=${DEF_XFS_SYNC_INTERVAL:-'30'}
+DEF_XFS_BUFD_INTERVAL=${DEF_XFS_BUFD_INTERVAL:-'1'}
# This must be adjusted manually to the value of HZ in the running kernel
# on 2.4, until the XFS people change their 2.4 external interfaces to work in
# some fixes. On 2.6 kernels, XFS uses USER_HZ instead of HZ for external
# interfaces, and that is currently always set to 100. So you don't need to
# change this on 2.6.
-XFS_HZ=100
+XFS_HZ=${XFS_HZ:-'100'}
#############################################################################
fi
}
+deduce_fstype () {
+ MP="$1"
+ # My root filesystem unfortunately has
+ # type "unknown" in /etc/mtab. If we encounter
+ # "unknown", we try to get the type from fstab.
+ cat /etc/fstab |
+ grep -v '^#' |
+ while read FSTAB_DEV FSTAB_MP FSTAB_FST FSTAB_OPTS FSTAB_DUMP FSTAB_DUMP ; do
+ if [ "$FSTAB_MP" = "$MP" ]; then
+ echo $FSTAB_FST
+ exit 0
+ fi
+ done
+}
if [ $DO_REMOUNT_NOATIME -eq 1 ] ; then
NOATIME_OPT=",noatime"
if [ $DO_REMOUNTS -eq 1 ]; then
cat /etc/mtab | while read DEV MP FST OPTS DUMP PASS ; do
PARSEDOPTS="$(parse_mount_opts "$OPTS")"
+ if [ "$FST" = 'unknown' ]; then
+ FST=$(deduce_fstype $MP)
+ fi
case "$FST" in
"ext3"|"reiserfs")
PARSEDOPTS="$(parse_mount_opts commit "$OPTS")"
fi
done
fi
+ if [ $DO_HD -eq 1 ] ; then
+ for THISHD in $HD ; do
+ /sbin/hdparm -S $BATT_HD $THISHD > /dev/null 2>&1
+ /sbin/hdparm -B 1 $THISHD > /dev/null 2>&1
+ done
+ fi
+ if [ $DO_CPU -eq 1 -a -e /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq ]; then
+ if [ $CPU_MAXFREQ = 'slowest' ]; then
+ CPU_MAXFREQ=`cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq`
+ fi
+ echo $CPU_MAXFREQ > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
+ fi
echo "."
;;
stop)
if [ $DO_REMOUNTS -eq 1 ] ; then
cat /etc/mtab | while read DEV MP FST OPTS DUMP PASS ; do
# Reset commit and atime options to defaults.
+ if [ "$FST" = 'unknown' ]; then
+ FST=$(deduce_fstype $MP)
+ fi
case "$FST" in
"ext3"|"reiserfs")
PARSEDOPTS="$(parse_mount_opts_wfstab $DEV commit $OPTS)"
fi
done
fi
+ if [ $DO_HD -eq 1 ] ; then
+ for THISHD in $HD ; do
+ /sbin/hdparm -S $AC_HD $THISHD > /dev/null 2>&1
+ /sbin/hdparm -B 255 $THISHD > /dev/null 2>&1
+ done
+ fi
+ if [ $DO_CPU -eq 1 -a -e /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq ]; then
+ echo `cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq` > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
+ fi
echo "."
;;
*)
esac
exit 0
---------------------CONTROL SCRIPT END--------------------------------------------
+--------------------CONTROL SCRIPT END------------------------------------------
ACPI integration
----------------
Dax Kelson submitted this so that the ACPI acpid daemon will
-kick off the laptop_mode script and run hdparm.
+kick off the laptop_mode script and run hdparm. The part that
+automatically disables laptop mode when the battery is low was
+writen by Jan Topinski.
----------------------------/etc/acpi/events/ac_adapter BEGIN-------------------------------------------
+-----------------/etc/acpi/events/ac_adapter BEGIN------------------------------
event=ac_adapter
-action=/etc/acpi/actions/battery.sh
----------------------------/etc/acpi/events/ac_adapter END-------------------------------------------
+action=/etc/acpi/actions/ac.sh %e
+----------------/etc/acpi/events/ac_adapter END---------------------------------
+
----------------------------/etc/acpi/actions/battery.sh BEGIN-------------------------------------------
-#!/bin/sh
+-----------------/etc/acpi/events/battery BEGIN---------------------------------
+event=battery.*
+action=/etc/acpi/actions/battery.sh %e
+----------------/etc/acpi/events/battery END------------------------------------
-# cpu throttling
-# cat /proc/acpi/processor/CPU0/throttling for more info
-ACAD_THR=0
-BATT_THR=2
-# spindown time for HD (man hdparm for valid values)
-# I prefer 2 hours for acad and 20 seconds for batt
-ACAD_HD=244
-BATT_HD=4
+----------------/etc/acpi/actions/ac.sh BEGIN-----------------------------------
+#!/bin/bash
-# ac/battery event handler
+# ac on/offline event handler
-status=`awk '/^state: / { print $2 }' /proc/acpi/ac_adapter/AC/state`
+status=`awk '/^state: / { print $2 }' /proc/acpi/ac_adapter/$2/state`
case $status in
"on-line")
- echo "Setting HD spindown for AC mode."
/sbin/laptop_mode stop
- /sbin/hdparm -S $ACAD_HD /dev/hda > /dev/null 2>&1
- /sbin/hdparm -B 255 /dev/hda > /dev/null 2>&1
- #echo -n $ACAD_CPU:$ACAD_THR > /proc/acpi/processor/CPU0/limit
exit 0
;;
"off-line")
- echo "Setting HD spindown for battery mode."
/sbin/laptop_mode start
- /sbin/hdparm -S $BATT_HD /dev/hda > /dev/null 2>&1
- /sbin/hdparm -B 1 /dev/hda > /dev/null 2>&1
- #echo -n $BATT_CPU:$BATT_THR > /proc/acpi/processor/CPU0/limit
exit 0
;;
esac
----------------------------/etc/acpi/actions/battery.sh END-------------------------------------------
+---------------------------/etc/acpi/actions/ac.sh END--------------------------
+
+
+---------------------------/etc/acpi/actions/battery.sh BEGIN-------------------
+#! /bin/bash
+
+# Automatically disable laptop mode when the battery almost runs out.
+
+BATT_INFO=/proc/acpi/battery/$2/state
+
+if [[ -f /proc/sys/vm/laptop_mode ]]
+then
+ LM=`cat /proc/sys/vm/laptop_mode`
+ if [[ $LM -gt 0 ]]
+ then
+ if [[ -f $BATT_INFO ]]
+ then
+ # Source the config file only now that we know we need
+ if [ -f /etc/default/laptop-mode ] ; then
+ # Debian
+ . /etc/default/laptop-mode
+ elif [ -f /etc/sysconfig/laptop-mode ] ; then
+ # Others
+ . /etc/sysconfig/laptop-mode
+ fi
+ MINIMUM_BATTERY_MINUTES=${MINIMUM_BATTERY_MINUTES:-'10'}
+
+ ACTION="`cat $BATT_INFO | grep charging | cut -c 26-`"
+ if [[ ACTION -eq "discharging" ]]
+ then
+ PRESENT_RATE=`cat $BATT_INFO | grep "present rate:" | sed "s/.* \([0-9][0-9]* \).*/\1/" `
+ REMAINING=`cat $BATT_INFO | grep "remaining capacity:" | sed "s/.* \([0-9][0-9]* \).*/\1/" `
+ fi
+ if (($REMAINING * 60 / $PRESENT_RATE < $MINIMUM_BATTERY_MINUTES))
+ then
+ /sbin/laptop_mode stop
+ fi
+ else
+ logger -p daemon.warning "You are using laptop mode and your battery interface $BATT_INFO is missing. This may lead to loss of data when the battery runs out. Check kernel ACPI support and /proc/acpi/battery folder, and edit /etc/acpi/battery.sh to set BATT_INFO to the correct path."
+ fi
+ fi
+fi
+---------------------------/etc/acpi/actions/battery.sh END--------------------
+
Monitoring tool
---------------
Bartek Kania submitted this, it can be used to measure how much time your disk
spends spun up/down.
----------------------------dslm.c BEGIN-------------------------------------------
+---------------------------dslm.c BEGIN-----------------------------------------
/*
* Simple Disk Sleep Monitor
* by Bartek Kania
return 0;
}
----------------------------dslm.c END---------------------------------------------
+---------------------------dslm.c END-------------------------------------------
This should not cause problems for anybody, since everybody using a
2.1.x kernel should have updated their C library to a suitable version
-anyway (see the file "linux/Documentation/Changes".)
+anyway (see the file "Documentation/Changes".)
1.2 Allow Mixed Locks Again
---------------------------
The current list of parameters can be found in the files:
linux/net/TUNABLE
- linux/Documentation/networking/ip-sysctl.txt
+ Documentation/networking/ip-sysctl.txt
Some of these are accessible via the sysctl interface, and many more are
scheduled to be added in this way. For example, some parameters related
The SliceCOM board doesn't require firmware. You can have 4 of these cards
in one machine. The driver doesn't (yet) support shared interrupts, so
you will need a separate IRQ line for every board.
-Read linux/Documentation/networking/slicecom.txt for help on configuring
+Read Documentation/networking/slicecom.txt for help on configuring
this adapter.
THE HDLC/PPP LINE PROTOCOL DRIVER
you have to enable it with a boot time parameter. Prior to 2.4.2-ac18
the NMI-oopser is enabled unconditionally on x86 SMP boxes.
+On x86-64 the NMI oopser is on by default. On 64bit Intel CPUs
+it uses IO-APIC by default and on AMD it uses local APIC.
+
[ feel free to send bug reports, suggestions and patches to
Ingo Molnar <mingo@redhat.com> or the Linux SMP mailing
list at <linux-smp@vger.kernel.org> ]
should be sent to the mailing list available through the suspend2
website, and not to the Linux Kernel Mailing List. We are working
toward merging suspend2 into the mainline kernel.
+
+Q: Kernel thread must voluntarily freeze itself (call 'refrigerator'). But
+I found some kernel threads don't do it, and they don't freeze, and
+so the system can't sleep. Is this a known behavior?
+
+A: All such kernel threads need to be fixed, one by one. Select place
+where it is safe to be frozen (no kernel semaphores should be held at
+that point and it must be safe to sleep there), and add:
+
+ if (current->flags & PF_FREEZE)
+ refrigerator(PF_FREEZE);
+
+Q: What is the difference between between "platform", "shutdown" and
+"firmware" in /sys/power/disk?
+
+A:
+
+shutdown: save state in linux, then tell bios to powerdown
+
+platform: save state in linux, then tell bios to powerdown and blink
+ "suspended led"
+
+firmware: tell bios to save state itself [needs BIOS-specific suspend
+ partition, and has very little to do with swsusp]
+
+"platform" is actually right thing to do, but "shutdown" is most
+reliable.
Then notify /sbin/init that /etc/inittab has changed, by issuing
the telinit command with the q operand:
- cd /usr/src/linux/Documentation/s390
+ cd Documentation/s390
sh config3270.sh
sh /tmp/mkdev3270
telinit q
Documentation
=============
There is a SCSI documentation directory within the kernel source tree,
-typically /usr/src/linux/Documentation/scsi . Most documents are in plain
+typically Documentation/scsi . Most documents are in plain
(i.e. ASCII) text. This file is named scsi_mid_low_api.txt and can be
found in that directory. A more recent copy of this document may be found
at http://www.torque.net/scsi/scsi_mid_low_api.txt.gz .
<para>
More precise information can be found in
- <filename>alsa-kernel/Documentation/sound/alsa/ControlNames.txt</filename>.
+ <filename>Documentation/sound/alsa/ControlNames.txt</filename>.
</para>
</section>
</section>
The callback is much more complicated than the text-file
version. You need to use a low-level i/o functions such as
<function>copy_from/to_user()</function> to transfer the
- data. Also, you have to keep tracking the file position, too.
+ data.
<informalexample>
<programlisting>
static long my_file_io_read(snd_info_entry_t *entry,
void *file_private_data,
struct file *file,
- char *buf, long count)
+ char *buf,
+ unsigned long count,
+ unsigned long pos)
{
long size = count;
- if (file->f_pos + size > local_max_size)
- size = local_max_size - file->f_pos;
- if (copy_to_user(buf, local_data + file->f_pos, size))
+ if (pos + size > local_max_size)
+ size = local_max_size - pos;
+ if (copy_to_user(buf, local_data + pos, size))
return -EFAULT;
- file->f_pos += size;
return size;
}
]]>
# insmod awe_wave
(Be sure to load awe_wave after sb!)
- See /usr/src/linux/Documentation/sound/oss/AWE32 for
+ See Documentation/sound/oss/AWE32 for
more details.
9. (only for obsolete systems) If you don't have /dev/sequencer
========
0.1.0 11/20/1998 First version, draft
1.0.0 11/1998 Alan Cox changes, incorporation in 2.2.0
- as /usr/src/linux/Documentation/sound/oss/Introduction
+ as Documentation/sound/oss/Introduction
1.1.0 6/30/1999 Second version, added notes on making the drivers,
added info on multiple sound cards of similar types,]
added more diagnostics info, added info about esd.
4) OSS's WWW site at http://www.opensound.com.
-5) All the files in linux/Documentation/sound.
+5) All the files in Documentation/sound.
6) The comments and code in linux/drivers/sound.
This documentation is relevant for the PAS16 driver (pas2_card.c and
friends) under kernel version 2.3.99 and later. If you are
unfamiliar with configuring sound under Linux, please read the
-Sound-HOWTO, linux/Documentation/sound/oss/Introduction and other
+Sound-HOWTO, Documentation/sound/oss/Introduction and other
relevant docs first.
The following information is relevant information from README.OSS
The new stuff for 2.3.99 and later
============================================================================
-The following configuration options from linux/Documentation/Configure.help
+The following configuration options from Documentation/Configure.help
are relevant to configuring the PAS16:
Sound card support
dev/ device specific information (eg dev/cdrom/info)
fs/ specific filesystems
filehandle, inode, dentry and quota tuning
- binfmt_misc <linux/Documentation/binfmt_misc.txt>
+ binfmt_misc <Documentation/binfmt_misc.txt>
kernel/ global kernel info / tuning
miscellaneous stuff
net/ networking stuff, for documentation look in:
- <linux/Documentation/networking/>
+ <Documentation/networking/>
proc/ <empty>
sunrpc/ SUN Remote Procedure Call (NFS)
vm/ memory management tuning
- dirty_writeback_centisecs
- max_map_count
- min_free_kbytes
+- laptop_mode
+- block_dump
==============================================================
dirty_ratio, dirty_background_ratio, dirty_expire_centisecs,
-dirty_writeback_centisecs, vfs_cache_pressure:
+dirty_writeback_centisecs, vfs_cache_pressure, laptop_mode,
+block_dump:
See Documentation/filesystems/proc.txt
NOTE:
The USB subsystem now has a substantial section in "The Linux Kernel API"
- guide (in linux/Documentation/DocBook), generated from the current source
+ guide (in Documentation/DocBook), generated from the current source
code. This particular documentation file isn't particularly current or
complete; don't rely on it except for a quick overview.
2000-July-12
For USB help other than the readme files that are located in
-linux/Documentation/usb/*, see the following:
+Documentation/usb/*, see the following:
Linux-USB project: http://www.linux-usb.org
mirrors at http://www.suse.cz/development/linux-usb/
Information - video4linux:
http://roadrunner.swansea.linux.org.uk/v4lapi.shtml
-/usr/src/linux/Documentation/video4linux/API.html
+Documentation/video4linux/API.html
/usr/include/linux/videodev.h
Information - video4linux/mjpeg extensions:
DEFXX FDDI NETWORK DRIVER
P: Maciej W. Rozycki
-M: macro@ds2.pg.gda.pl
+M: macro@linux-mips.org
S: Maintained
DELL LAPTOP SMM DRIVER
L: linuxppc-embedded@lists.linuxppc.org
S: Maintained
+LINUX FOR POWERPC EMBEDDED PPC8XX AND BOOT CODE
+P: Tom Rini
+M: trini@kernel.crashing.org
+W: http://www.penguinppc.org/
+L: linuxppc-embedded@lists.linuxppc.org
+S: Maintained
+
LINUX FOR POWERPC EMBEDDED PPC85XX
P: Kumar Gala
M: kumar.gala@freescale.com
L: linux-scsi@vger.kernel.org
S: Maintained
-M68K
-P: Jes Sorensen
-M: jes@trained-monkey.org
-W: http://www.clark.net/pub/lawrencc/linux/index.html
+M68K ARCHITECTURE
+P: Geert Uytterhoeven
+M: geert@linux-m68k.org
+P: Roman Zippel
+M: zippel@linux-m68k.org
L: linux-m68k@lists.linux-m68k.org
+W: http://www.linux-m68k.org/
+W: http://linux-m68k-cvs.ubb.ca/
S: Maintained
M68K ON APPLE MACINTOSH
P: David Woodhouse
M: dwmw2@redhat.com
W: http://www.linux-mtd.infradead.org/
-L: mtd@infradead.org
+L: linux-mtd@lists.infradead.org
S: Maintained
MICROTEK X6 SCANNER
M: jmorris@redhat.com
P: Hideaki YOSHIFUJI
M: yoshfuji@linux-ipv6.org
+P: Patrick McHardy
+M: kaber@coreworks.de
L: netdev@oss.sgi.com
S: Maintained
ONSTREAM SCSI TAPE DRIVER
P: Willem Riede
M: osst@riede.org
-L: osst@linux1.onstream.nl
+L: osst-users@lists.sourceforge.net
L: linux-scsi@vger.kernel.org
S: Maintained
S: Maintained
SPARC (sparc32):
-P: Keith M. Wesolowski
-M: wesolows@foobazco.org
+P: William L. Irwin
+M: wli@holomorphy.com
L: sparclinux@vger.kernel.org
S: Maintained
KBUILD_CHECKSRC = 0
endif
-# Use make M=dir to specify direcotry of external module to build
+# Use make M=dir to specify directory of external module to build
# Old syntax make ... SUBDIRS=$PWD is still supported
# Setting the environment variable KBUILD_EXTMOD take precedence
ifdef SUBDIRS
_all: modules
endif
-# Make sure we're not wasting cpu-cycles doing locale handling, yet do make
-# sure error messages appear in the user-desired language
-ifdef LC_ALL
-LANG := $(LC_ALL)
-LC_ALL :=
-endif
-LC_COLLATE := C
-LC_CTYPE := C
-export LANG LC_ALL LC_COLLATE LC_CTYPE
-
srctree := $(if $(KBUILD_SRC),$(KBUILD_SRC),$(CURDIR))
TOPDIR := $(srctree)
# FIXME - TOPDIR is obsolete, use srctree/objtree
$(sort $(vmlinux-objs)) arch/$(ARCH)/kernel/vmlinux.lds.s: $(vmlinux-dirs) ;
-# Handle descending into subdirectories listed in $(vmlinux-dirs)
+# Handle descending into subdirectories listed in $(vmlinux-dirs)
+# Preset locale variables to speed up the build process. Limit locale
+# tweaks to this spot to avoid wrong language settings when running
+# make menuconfig etc.
+# Error messages still appears in the original language
.PHONY: $(vmlinux-dirs)
$(vmlinux-dirs): prepare-all scripts
# A multi level approach is used. prepare1 is updated first, then prepare0.
# prepare-all is the collection point for the prepare targets.
-.PHONY: prepare-all prepare prepare0 prepare1
+.PHONY: prepare-all prepare prepare0 prepare1 prepare2
+
+# prepare 2 generate Makefile to be placed in output directory, if
+# using a seperate output directory. This allows convinient use
+# of make in output directory
+prepare2:
+ $(Q)if [ ! $(srctree) -ef $(objtree) ]; then \
+ $(CONFIG_SHELL) $(srctree)/scripts/mkmakefile \
+ $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL) \
+ > $(objtree)/Makefile; \
+ fi
# prepare1 is used to check if we are building in a separate output directory,
# and if so do:
# 1) Check that make has not been executed in the kernel src $(srctree)
# 2) Create the include2 directory, used for the second asm symlink
-prepare1:
+prepare1: prepare2
ifneq ($(KBUILD_SRC),)
@echo ' Using $(srctree) as source for kernel'
$(Q)if [ -h $(srctree)/include/asm -o -f $(srctree)/.config ]; then \
sleep 1; \
fi
@rm -rf $(MODLIB)/kernel
- @rm -f $(MODLIB)/build
+ @rm -f $(MODLIB)/source
@mkdir -p $(MODLIB)/kernel
- @ln -s $(TOPDIR) $(MODLIB)/build
+ @ln -s $(srctree) $(MODLIB)/source
+ @if [ ! $(objtree) -ef $(MODLIB)/build ]; then \
+ rm -f $(MODLIB)/build ; \
+ ln -s $(objtree) $(MODLIB)/build ; \
+ fi
$(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modinst
# If System.map exists, run depmod. This deliberately does not have a
# ---------------------------------------------------------------------------
define all-sources
- ( find . $(RCS_FIND_IGNORE) \
+ ( find $(srctree) $(RCS_FIND_IGNORE) \
\( -name include -o -name arch \) -prune -o \
-name '*.[chS]' -print; \
- find arch/$(ARCH) $(RCS_FIND_IGNORE) \
+ find $(srctree)/arch/$(ARCH) $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print; \
- find security/selinux/include $(RCS_FIND_IGNORE) \
+ find $(srctree)/security/selinux/include $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print; \
- find include $(RCS_FIND_IGNORE) \
+ find $(srctree)/include $(RCS_FIND_IGNORE) \
\( -name config -o -name 'asm-*' \) -prune \
-o -name '*.[chS]' -print; \
- find include/asm-$(ARCH) $(RCS_FIND_IGNORE) \
+ find $(srctree)/include/asm-$(ARCH) $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print; \
- find include/asm-generic $(RCS_FIND_IGNORE) \
+ find $(srctree)/include/asm-generic $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print )
endef
- There are various README files in the Documentation/ subdirectory:
these typically contain kernel-specific installation notes for some
- drivers for example. See ./Documentation/00-INDEX for a list of what
+ drivers for example. See Documentation/00-INDEX for a list of what
is contained in each file. Please read the Changes file, as it
contains information about the problems, which may result by upgrading
your kernel.
Compiling and running the 2.6.xx kernels requires up-to-date
versions of various software packages. Consult
- ./Documentation/Changes for the minimum version numbers required
+ Documentation/Changes for the minimum version numbers required
and how to get updates for these packages. Beware that using
excessively old versions of these packages can cause indirect
errors that are very difficult to track down, so don't assume that
gcc 2.91.66 (egcs-1.1.2), and gcc 2.7.2.3 are known to miscompile
some parts of the kernel, and are *no longer supported*.
Also remember to upgrade your binutils package (for as/ld/nm and company)
- if necessary. For more information, refer to ./Documentation/Changes.
+ if necessary. For more information, refer to Documentation/Changes.
Please note that you can still run a.out user programs with this kernel.
* Copyright (C) 2001-2002 Jan-Benedict Glaw <jbglaw@lug-owl.de>
*
* This driver is at all a modified version of Erik Mouw's
- * ./linux/Documentation/DocBook/procfs_example.c, so: thank
+ * Documentation/DocBook/procfs_example.c, so: thank
* you, Erik! He can be reached via email at
* <J.A.K.Mouw@its.tudelft.nl>. It is based on an idea
* provided by DEC^WCompaq^WIntel's "Jumpstart" CD. They
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 220 */
.quad alpha_ni_syscall
+#ifdef CONFIG_TUX
+ .quad __sys_tux
+#else
+# ifdef CONFIG_TUX_MODULE
+ .quad sys_tux
+# else
.quad alpha_ni_syscall
+# endif
+#endif
.quad alpha_ni_syscall
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 225 */
help
This enables the CPUfreq driver for ARM Integrator CPUs.
- For details, take a look at linux/Documentation/cpu-freq.
+ For details, take a look at <file:Documentation/cpu-freq>.
If in doubt, say Y.
CFLAGS +=-mapcs-32 $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
AFLAGS +=-mapcs-32 $(arch-y) $(tune-y) -msoft-float -Wa,-mno-fpu
+CHECK := $(CHECK) -D__arm__=1
+
#Default value
DATAADDR := .
--defsym params_phys=$(PARAMS_PHYS) -T
AFLAGS_initrd.o :=-DINITRD=\"$(INITRD)\"
-targets := bootp bootp.lds init.o kernel.o initrd.o
+targets := bootp init.o kernel.o initrd.o
# Note that bootp.lds picks up kernel.o and initrd.o
-$(obj)/bootp: $(addprefix $(obj)/,bootp.lds init.o kernel.o initrd.o) FORCE
+$(obj)/bootp: $(src)/bootp.lds $(addprefix $(obj)/,init.o kernel.o initrd.o) FORCE
$(call if_changed,ld)
@:
.type _start, #function
.globl _start
-_start: adr r13, data
+_start: add lr, pc, #-0x8 @ lr = current load addr
+ adr r13, data
ldmia r13!, {r4-r6} @ r5 = dest, r6 = length
+ add r4, r4, lr @ r4 = initrd_start + load addr
bl move @ move the initrd
/*
CFLAGS_font.o := -Dstatic=
$(obj)/font.o: $(FONTC)
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in Makefile arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
@sed "$(SEDFLAGS)" < $< > $@
$(obj)/misc.o: $(obj)/misc.c include/asm/arch/uncompress.h lib/inflate.c
mov r0, #0x30
mcr p15, 0, r0, c1, c0, 0
mov r0, #0x13
- msr cpsr, r0
+ msr cpsr_cxsf, r0
mov r12, #0x03000000 @ point to LEDs
orr r12, r12, #0x00020000
orr r12, r12, #0xba00
/* Ensure all interrupts are off and MMU disabled */
mrs r0, cpsr
orr r0, r0, #0xc0
- msr cpsr, r0
+ msr cpsr_cxsf, r0
adr lr, 1b
orr lr, lr, #0x10000000
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_HOTPLUG is not set
# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
#
# Loadable module support
#
# System Type
#
-# CONFIG_ARCH_ADIFCC is not set
-# CONFIG_ARCH_ANAKIN is not set
# CONFIG_ARCH_CLPS7500 is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
-# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
-# CONFIG_ARCH_SHARK is not set
CONFIG_ARCH_S3C2410=y
-
-#
-# CLPS711X/EP721X Implementations
-#
-
-#
-# Epxa10db
-#
-
-#
-# Footbridge Implementations
-#
-
-#
-# IOP3xx Implementation Options
-#
-# CONFIG_ARCH_IOP310 is not set
-# CONFIG_ARCH_IOP321 is not set
-
-#
-# IOP3xx Chipset Features
-#
-
-#
-# Intel PXA250/210 Implementations
-#
-
-#
-# SA11x0 Implementations
-#
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE_PB is not set
#
# S3C2410 Implementations
#
CONFIG_ARCH_BAST=y
+# CONFIG_ARCH_H1940 is not set
+# CONFIG_ARCH_SMDK2410 is not set
+CONFIG_MACH_VR1000=y
#
# Processor Type
# General setup
#
# CONFIG_ZBOOT_ROM is not set
-CONFIG_ZBOOT_ROM_TEXT=0
-CONFIG_ZBOOT_ROM_BSS=0
-# CONFIG_HOTPLUG is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
#
# At least one math emulation must be selected
CONFIG_FPE_NWFPE=y
CONFIG_FPE_NWFPE_XP=y
# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_AOUT=y
# CONFIG_BINFMT_MISC is not set
#
# Generic Driver Options
#
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
# CONFIG_PM is not set
# CONFIG_PREEMPT is not set
# CONFIG_ARTHUR is not set
+CONFIG_S3C2410_DMA=y
+# CONFIG_S3C2410_DMA_DEBUG is not set
CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0"
CONFIG_ALIGNMENT_TRAP=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_PC_CML1=y
-# CONFIG_PARPORT_SERIAL is not set
CONFIG_PARPORT_PC_FIFO=y
CONFIG_PARPORT_PC_SUPERIO=y
# CONFIG_PARPORT_ARC is not set
#
# Plug and Play support
#
-# CONFIG_PNP is not set
#
# Block devices
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_ARPD is not set
-# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_IPV6 is not set
-# CONFIG_DECNET is not set
-# CONFIG_BRIDGE is not set
# CONFIG_NETFILTER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
-CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
#
# Ethernet (10000 Mbit)
#
-# CONFIG_PLIP is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
#
-# Wireless LAN (non-hamradio)
+# Token Ring devices
#
-# CONFIG_NET_RADIO is not set
-# CONFIG_HOSTAP is not set
#
-# Token Ring devices
+# Wireless LAN (non-hamradio)
#
-# CONFIG_SHAPER is not set
+# CONFIG_NET_RADIO is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-
-#
-# Amateur Radio support
-#
-# CONFIG_HAMRADIO is not set
-
-#
-# IrDA (infrared) support
-#
-# CONFIG_IRDA is not set
-
-#
-# Bluetooth support
-#
-# CONFIG_BT is not set
+# CONFIG_PLIP is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
#
# ATA/ATAPI/MFM/RLL support
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
+# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
-# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=m
#
# IDE chipset support/bugfixes
#
-CONFIG_BLK_DEV_IDE_BAST=y
+CONFIG_IDE_GENERIC=y
+# CONFIG_IDE_ARM is not set
# CONFIG_BLK_DEV_IDEDMA is not set
# CONFIG_IDEDMA_AUTO is not set
-# CONFIG_DMA_NONPCI is not set
# CONFIG_BLK_DEV_HD is not set
#
#
# CONFIG_SCSI is not set
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
#
# I2O device support
#
#
# ISDN subsystem
#
-# CONFIG_ISDN_BOOL is not set
+# CONFIG_ISDN is not set
#
# Input device support
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_TSLIBDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
# CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y
CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
+# CONFIG_SERIO_I8042 is not set
CONFIG_SERIO_SERPORT=y
# CONFIG_SERIO_CT82C710 is not set
# CONFIG_SERIO_PARKBD is not set
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
-# CONFIG_MOUSE_PS2_SYNAPTICS is not set
# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
#
CONFIG_SERIAL_S3C2410=y
CONFIG_SERIAL_S3C2410_CONSOLE=y
-# CONFIG_SERIAL_DZ is not set
+# CONFIG_SERIAL_BAST_SIO is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_UNIX98_PTY_COUNT=256
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
CONFIG_PRINTER=y
# CONFIG_LP_CONSOLE is not set
CONFIG_PPDEV=y
# CONFIG_TIPAR is not set
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
#
# I2C support
#
# CONFIG_I2C_AMD756 is not set
# CONFIG_I2C_AMD8111 is not set
-# CONFIG_I2C_PHILIPSPAR is not set
+# CONFIG_I2C_ISA is not set
+# CONFIG_I2C_PARPORT is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_SCx200_ACB is not set
#
-# I2C Hardware Sensors Chip support
+# Hardware Sensors Chip support
#
CONFIG_I2C_SENSOR=m
# CONFIG_SENSORS_ADM1021 is not set
-CONFIG_SENSORS_EEPROM=m
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_GL518SM is not set
# CONFIG_SENSORS_IT87 is not set
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM78=m
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
CONFIG_SENSORS_LM85=m
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
#
-# L3 serial bus support
-#
-# CONFIG_L3 is not set
-
-#
-# Mice
-#
-# CONFIG_BUSMOUSE is not set
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
+# Other I2C Chip support
#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-CONFIG_RTC=y
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
+CONFIG_SENSORS_EEPROM=m
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
#
# Multimedia devices
#
# CONFIG_DVB is not set
-#
-# MMC/SD Card support
-#
-# CONFIG_MMC is not set
-
#
# File systems
#
CONFIG_FAT_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
-CONFIG_DEVPTS_FS=y
# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
CONFIG_LOCKD=y
# CONFIG_EXPORTFS is not set
CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_GSS is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
-# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
CONFIG_SOLARIS_X86_PARTITION=y
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
-# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
-CONFIG_NLS=y
#
# Native Language Support
#
+CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
#
# Graphics support
#
# CONFIG_LOGO is not set
#
-# Misc devices
+# Sound
#
+# CONFIG_SOUND is not set
#
-# Multimedia Capabilities Port drivers
+# Misc devices
#
-# CONFIG_MCP is not set
#
-# Console Switches
+# USB support
#
-# CONFIG_SWITCHES is not set
#
-# USB support
+# USB Gadget Support
#
# CONFIG_USB_GADGET is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_ERRORS is not set
CONFIG_DEBUG_LL=y
-CONFIG_DEBUG_LL_PRINTK=y
+# CONFIG_DEBUG_ICEDCC is not set
CONFIG_DEBUG_S3C2410_PORT=y
CONFIG_DEBUG_S3C2410_UART=0
#
# Library routines
#
+# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
#
CONFIG_EXPERIMENTAL=y
# CONFIG_CLEAN_COMPILE is not set
-CONFIG_STANDALONE=y
CONFIG_BROKEN=y
CONFIG_BROKEN_ON_SMP=y
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
#
# Loadable module support
#
# System Type
#
-# CONFIG_ARCH_ADIFCC is not set
-# CONFIG_ARCH_ANAKIN is not set
# CONFIG_ARCH_CLPS7500 is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
-# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
-# CONFIG_ARCH_SHARK is not set
CONFIG_ARCH_S3C2410=y
-
-#
-# CLPS711X/EP721X Implementations
-#
-
-#
-# Epxa10db
-#
-
-#
-# Footbridge Implementations
-#
-
-#
-# IOP3xx Implementation Options
-#
-# CONFIG_ARCH_IOP310 is not set
-# CONFIG_ARCH_IOP321 is not set
-
-#
-# IOP3xx Chipset Features
-#
-
-#
-# Intel PXA250/210 Implementations
-#
-
-#
-# SA11x0 Implementations
-#
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE_PB is not set
#
# S3C2410 Implementations
#
CONFIG_ARCH_BAST=y
CONFIG_ARCH_H1940=y
+CONFIG_ARCH_SMDK2410=y
+CONFIG_MACH_VR1000=y
#
# Processor Type
# General setup
#
# CONFIG_ZBOOT_ROM is not set
-CONFIG_ZBOOT_ROM_TEXT=0
-CONFIG_ZBOOT_ROM_BSS=0
-# CONFIG_HOTPLUG is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
#
# At least one math emulation must be selected
CONFIG_FPE_NWFPE=y
CONFIG_FPE_NWFPE_XP=y
# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_AOUT=y
# CONFIG_BINFMT_MISC is not set
#
# Generic Driver Options
#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_DEBUG_DRIVER is not set
# CONFIG_PM is not set
# CONFIG_PREEMPT is not set
# CONFIG_ARTHUR is not set
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_PC_CML1=y
-# CONFIG_PARPORT_SERIAL is not set
CONFIG_PARPORT_PC_FIFO=y
CONFIG_PARPORT_PC_SUPERIO=y
# CONFIG_PARPORT_ARC is not set
# CONFIG_MTD_JEDECPROBE is not set
CONFIG_MTD_GEN_PROBE=y
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
CONFIG_MTD_CFI_INTELEXT=y
# CONFIG_MTD_CFI_AMDSTD is not set
# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
# CONFIG_MTD_RAM is not set
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set
# Self-contained MTD device drivers
#
# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
# CONFIG_MTD_BLKMTD is not set
#
# Plug and Play support
#
-# CONFIG_PNP is not set
#
# Block devices
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_ARPD is not set
-# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_IPV6 is not set
-# CONFIG_DECNET is not set
-# CONFIG_BRIDGE is not set
# CONFIG_NETFILTER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
-CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
#
# Ethernet (10000 Mbit)
#
-# CONFIG_PLIP is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
#
-# Wireless LAN (non-hamradio)
+# Token Ring devices
#
-# CONFIG_NET_RADIO is not set
-# CONFIG_HOSTAP is not set
#
-# Token Ring devices
+# Wireless LAN (non-hamradio)
#
-# CONFIG_SHAPER is not set
+# CONFIG_NET_RADIO is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-
-#
-# Amateur Radio support
-#
-# CONFIG_HAMRADIO is not set
-
-#
-# IrDA (infrared) support
-#
-# CONFIG_IRDA is not set
-
-#
-# Bluetooth support
-#
-# CONFIG_BT is not set
+# CONFIG_PLIP is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
#
# ATA/ATAPI/MFM/RLL support
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
+# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
-# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=m
#
# IDE chipset support/bugfixes
#
-CONFIG_BLK_DEV_IDE_BAST=y
+CONFIG_IDE_GENERIC=y
+# CONFIG_IDE_ARM is not set
# CONFIG_BLK_DEV_IDEDMA is not set
# CONFIG_IDEDMA_AUTO is not set
-# CONFIG_DMA_NONPCI is not set
# CONFIG_BLK_DEV_HD is not set
#
#
# CONFIG_SCSI is not set
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
#
# I2O device support
#
#
# ISDN subsystem
#
-# CONFIG_ISDN_BOOL is not set
+# CONFIG_ISDN is not set
#
# Input device support
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_TSLIBDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
-# CONFIG_MOUSE_PS2_SYNAPTICS is not set
# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
# CONFIG_DIGI is not set
# CONFIG_MOXA_INTELLIO is not set
# CONFIG_MOXA_SMARTIO is not set
-# CONFIG_ISI is not set
-# CONFIG_SYNCLINK is not set
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
# CONFIG_RISCOM8 is not set
CONFIG_SERIAL_S3C2410=y
CONFIG_SERIAL_S3C2410_CONSOLE=y
CONFIG_SERIAL_BAST_SIO=y
-# CONFIG_SERIAL_DZ is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_UNIX98_PTY_COUNT=256
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
CONFIG_PRINTER=y
# CONFIG_LP_CONSOLE is not set
CONFIG_PPDEV=y
# CONFIG_TIPAR is not set
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
#
# I2C support
#
# CONFIG_I2C_AMD756 is not set
# CONFIG_I2C_AMD8111 is not set
-# CONFIG_I2C_PHILIPSPAR is not set
+# CONFIG_I2C_ISA is not set
+# CONFIG_I2C_PARPORT is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_SCx200_ACB is not set
#
-# I2C Hardware Sensors Chip support
+# Hardware Sensors Chip support
#
CONFIG_I2C_SENSOR=m
# CONFIG_SENSORS_ADM1021 is not set
-CONFIG_SENSORS_EEPROM=m
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_GL518SM is not set
# CONFIG_SENSORS_IT87 is not set
CONFIG_SENSORS_LM75=m
+# CONFIG_SENSORS_LM77 is not set
CONFIG_SENSORS_LM78=m
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
CONFIG_SENSORS_LM85=m
-# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
#
-# L3 serial bus support
-#
-# CONFIG_L3 is not set
-
-#
-# Mice
-#
-# CONFIG_BUSMOUSE is not set
-# CONFIG_QIC02_TAPE is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-CONFIG_RTC=y
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
+# Other I2C Chip support
#
-# CONFIG_FTAPE is not set
-# CONFIG_AGP is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
+CONFIG_SENSORS_EEPROM=m
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
#
# Multimedia devices
#
# CONFIG_DVB is not set
-#
-# MMC/SD Card support
-#
-# CONFIG_MMC is not set
-
#
# File systems
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
-CONFIG_DEVPTS_FS=y
# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
CONFIG_JFFS_FS=y
CONFIG_JFFS_FS_VERBOSE=0
+# CONFIG_JFFS_PROC_FS is not set
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=0
# CONFIG_JFFS2_FS_NAND is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
CONFIG_LOCKD=y
# CONFIG_EXPORTFS is not set
CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_GSS is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
-# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
CONFIG_SOLARIS_X86_PARTITION=y
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
-# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
-CONFIG_NLS=y
#
# Native Language Support
#
+CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
#
# Graphics support
#
# CONFIG_LOGO is not set
#
-# Misc devices
+# Sound
#
+# CONFIG_SOUND is not set
#
-# Multimedia Capabilities Port drivers
+# Misc devices
#
-# CONFIG_MCP is not set
#
-# Console Switches
+# USB support
#
-# CONFIG_SWITCHES is not set
#
-# USB support
+# USB Gadget Support
#
# CONFIG_USB_GADGET is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_ERRORS is not set
CONFIG_DEBUG_LL=y
-CONFIG_DEBUG_LL_PRINTK=y
# CONFIG_DEBUG_ICEDCC is not set
CONFIG_DEBUG_S3C2410_PORT=y
CONFIG_DEBUG_S3C2410_UART=0
#
# Library routines
#
+# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
return err;
}
-static ssize_t apm_read(struct file *fp, char *buf, size_t count, loff_t *ppos)
+static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
{
struct apm_user *as = fp->private_data;
apm_event_t event;
bl do_DataAbort
disable_irq r0
ldr r0, [sp, #S_PSR]
- msr spsr, r0
+ msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.align 5
strne r0, [r0, -r0] @ bug()
#endif
ldr r0, [sp, #S_PSR] @ irqs are already disabled
- msr spsr, r0
+ msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.ltorg
1: disable_irq r0
ldr lr, [sp, #S_PSR] @ Get SVC cpsr
- msr spsr, lr
+ msr spsr_cxsf, lr
ldmia sp, {r0 - pc}^ @ Restore SVC registers
.align 5
bl do_PrefetchAbort @ call abort handler
disable_irq r0
ldr r0, [sp, #S_PSR]
- msr spsr, r0
+ msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.align 5
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr, r13 @ switch to SVC_32 mode
+ msr spsr_cxsf, r13 @ switch to SVC_32 mode
and lr, lr, #15
ldr lr, [pc, lr, lsl #2]
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr, r13 @ switch to SVC_32 mode
+ msr spsr_cxsf, r13 @ switch to SVC_32 mode
and lr, lr, #15
ldr lr, [pc, lr, lsl #2]
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr, r13 @ switch to SVC_32 mode
+ msr spsr_cxsf, r13 @ switch to SVC_32 mode
ands lr, lr, #15
ldr lr, [pc, lr, lsl #2]
mrs r13, cpsr
bic r13, r13, #MODE_MASK
orr r13, r13, #MODE_SVC
- msr spsr, r13 @ switch to SVC_32 mode
+ msr spsr_cxsf, r13 @ switch to SVC_32 mode
and lr, lr, #15
ldr lr, [pc, lr, lsl #2]
ldr r0, [sp, #S_PSR] @ Get calling cpsr
sub lr, lr, #4
str lr, [r8]
- msr spsr, r0
+ msr spsr_cxsf, r0
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
ldr lr, [sp, #S_PC] @ Get PC
ldr r1, [sp, #S_PSR] @ Get calling cpsr
disable_irq ip @ disable IRQs
ldr lr, [sp, #S_PC]! @ Get PC
- msr spsr, r1 @ save in spsr_svc
+ msr spsr_cxsf, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
.macro fast_restore_user_regs
ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
ldr lr, [sp, #S_OFF + S_PC]! @ get pc
- msr spsr, r1 @ save in spsr_svc
+ msr spsr_cxsf, r1 @ save in spsr_svc
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
.macro slow_restore_user_regs
ldr r1, [sp, #S_PSR] @ get calling cpsr
ldr lr, [sp, #S_PC]! @ get pc
- msr spsr, r1 @ save in spsr_svc
+ msr spsr_cxsf, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
* Copy data from IO memory space to "real" memory space.
* This needs to be optimized.
*/
-void _memcpy_fromio(void * to, unsigned long from, size_t count)
+void _memcpy_fromio(void *to, unsigned long from, size_t count)
{
+ unsigned char *t = to;
while (count) {
count--;
- *(char *) to = readb(from);
- ((char *) to)++;
+ *t = readb(from);
+ t++;
from++;
}
}
* Copy data from "real" memory space to IO memory space.
* This needs to be optimized.
*/
-void _memcpy_toio(unsigned long to, const void * from, size_t count)
+void _memcpy_toio(unsigned long to, const void *from, size_t count)
{
+ const unsigned char *f = from;
while (count) {
count--;
- writeb(*(char *) from, to);
- ((char *) from)++;
+ writeb(*f, to);
+ f++;
to++;
}
}
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
- info.si_addr = (void *)instruction_pointer(regs);
+ info.si_addr = (void __user *)instruction_pointer(regs);
force_sig_info(SIGTRAP, &info, tsk);
}
__put_user_error(NULL, &frame->uc.uc_link, err);
memset(&stack, 0, sizeof(stack));
- stack.ss_sp = (void *)current->sas_ss_sp;
+ stack.ss_sp = (void __user *)current->sas_ss_sp;
stack.ss_flags = sas_ss_flags(regs->ARM_sp);
stack.ss_size = current->sas_ss_size;
err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack));
union semun fourth;
if (!ptr)
return -EINVAL;
- if (get_user(fourth.__pad, (void __user **) ptr))
+ if (get_user(fourth.__pad, (void __user * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
unsigned int instr;
struct undef_hook *hook;
siginfo_t info;
- void *pc;
+ void __user *pc;
/*
* According to the ARM ARM, PC is 2 or 4 bytes ahead,
*/
regs->ARM_pc -= correction;
- pc = (void *)instruction_pointer(regs);
+ pc = (void __user *)instruction_pointer(regs);
if (thumb_mode(regs)) {
- get_user(instr, (u16 *)pc);
+ get_user(instr, (u16 __user *)pc);
} else {
- get_user(instr, (u32 *)pc);
+ get_user(instr, (u32 __user *)pc);
}
spin_lock_irq(&undef_lock);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
- info.si_addr = (void *)instruction_pointer(regs) -
+ info.si_addr = (void __user *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
force_sig_info(SIGILL, &info, current);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
- info.si_addr = (void *)instruction_pointer(regs) -
+ info.si_addr = (void __user *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4);
force_sig_info(SIGILL, &info, current);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
- info.si_addr = (void *)addr;
+ info.si_addr = (void __user *)addr;
force_sig_info(SIGILL, &info, current);
die_if_kernel("unknown data abort code", regs, instr);
#define CPSR2SPSR(rt) \
mrs rt, cpsr; \
- msr spsr, rt
+ msr spsr_cxsf, rt
@ Purpose: call an expansion card loader to read bytes.
@ Proto : char read_loader(int offset, char *card_base, char *loader);
EXPORT_SYMBOL(pci_set_dma_mask);
EXPORT_SYMBOL(pci_dac_set_dma_mask);
EXPORT_SYMBOL(pci_set_consistent_dma_mask);
+EXPORT_SYMBOL(ixp4xx_pci_read);
+EXPORT_SYMBOL(ixp4xx_pci_write);
.flags = IORESOURCE_MEM,
};
-static struct platform_device coyote_flash_device = {
+static struct platform_device coyote_flash = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.resource = &coyote_flash_resource,
};
+static struct platform_device *coyote_devices[] __initdata = {
+ &coyote_flash
+};
+
static void __init coyote_init(void)
{
- platform_add_device(&coyote_flash_device);
+ platform_add_devices(&coyote_devices, ARRAY_SIZE(coyote_devices));
}
MACHINE_START(ADI_COYOTE, "ADI Engineering IXP4XX Coyote Development Platform")
.flags = IORESOURCE_MEM,
};
-static struct platform_device ixdp425_flash_device = {
+static struct platform_device ixdp425_flash = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.num_resources = 0
};
+static struct platform_device *ixdp425_devices[] __initdata = {
+ &ixdp425_i2c_controller,
+ &ixdp425_flash
+};
+
static void __init ixdp425_init(void)
{
- platform_add_device(&ixdp425_flash_device);
- platform_add_device(&ixdp425_i2c_controller);
+ platform_add_devices(&ixdp425_devices, ARRAY_SIZE(ixdp425_devices));
}
MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
.flags = IORESOURCE_MEM,
};
-static struct platform_device prpmc1100_flash_device = {
+static struct platform_device prpmc1100_flash = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.resource = &prpmc1100_flash_resource,
};
+static struct platform_device *prpmc1100_devices[] __initdata = {
+ &prpmc1100_flash
+};
+
static void __init prpmc1100_init(void)
{
- platform_add_device(&prpmc1100_flash_device);
+ platform_add_devices(&prpmc1100_devices, ARRAY_SIZE(prpmc1100_devices));
}
MACHINE_START(PRPMC1100, "Motorola PrPMC1100")
<http://www.fsforth.de>
config MACH_VR1000
- bool "Simtec VR1000"
+ bool "Thorcom VR1000"
help
- Say Y here if you are using the Simtec VR1000 board.
+ Say Y here if you are using the Thorcom VR1000 board.
+
+ This linux port is currently being maintained by Simtec, on behalf
+ of Thorcom. Any queries, please contact Thorcom first.
endmenu
# Object file lists.
-obj-y := s3c2410.o irq.o time.o
+obj-y := s3c2410.o irq.o time.o gpio.o
obj-m :=
obj-n :=
obj- :=
[0] = {
.hwport = 0,
.flags = 0,
- .clock = &s3c2410_hclk,
+ .clock = &s3c2410_pclk,
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
[1] = {
.hwport = 1,
.flags = 0,
- .clock = &s3c2410_hclk,
+ .clock = &s3c2410_pclk,
.ucon = 0x245,
.ulcon = 0x03,
.ufcon = 0x00,
[2] = {
.hwport = 2,
.flags = 0,
- .clock = &s3c2410_hclk,
+ .clock = &s3c2410_pclk,
.ucon = 0x3c5,
.ulcon = 0x43,
.ufcon = 0x51,
void __init smdk2410_init_time(void)
{
- s3c2401_init_time();
+ s3c2410_init_time();
}
MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switch
* published by the Free Software Foundation.
*
* Modifications:
+ * 06-Aug-2004 BJD Fixed call to time initialisation
* 12-Jul-2004 BJD Renamed machine
* 16-May-2003 BJD Created initial version
* 16-Aug-2003 BJD Fixed header files and copyright, added URL
void __init vr1000_init_time(void)
{
- s3c2401_init_time();
+ s3c2410_init_time();
}
MACHINE_START(VR1000, "Thorcom-VR1000")
extern void s3c2410_init_irq(void);
-extern s3c2410_init_time(void);
+extern void s3c2410_init_time(void);
MAPIO(collie_map_io)
INITIRQ(sa1100_init_irq)
INIT_MACHINE(collie_init)
+ INITTIME(sa1100_init_time)
MACHINE_END
static int __init blockops_check(void)
{
register unsigned int err asm("r4") = 0;
+ unsigned int err_pos = 1;
unsigned int cache_type;
int i;
unregister_undef_hook(&blockops_hook);
- for (i = 0; i < ARRAY_SIZE(func); i++, err >>= 1)
- printk("%30s: %ssupported\n", func[i], err & 1 ? "not " : "");
+ for (i = 0; i < ARRAY_SIZE(func); i++, err_pos <<= 1)
+ printk("%30s: %ssupported\n", func[i], err & err_pos ? "not " : "");
if ((err & 8) == 0) {
printk(" --> Using %s block cache invalidate\n",
/* We must not map this if we have highmem enabled */
pte = pte_offset_map(pmd, addr);
printk(", *pte=%08lx", pte_val(*pte));
-#ifdef CONFIG_CPU_32
printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE]));
-#endif
pte_unmap(pte);
#endif
} while(0);
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = code;
- si.si_addr = (void *)addr;
+ si.si_addr = (void __user *)addr;
force_sig_info(SIGSEGV, &si, tsk);
}
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#ifdef CONFIG_CPU_32
-#define TABLE_OFFSET (PTRS_PER_PTE)
-#else
-#define TABLE_OFFSET 0
-#endif
-
-#define TABLE_SIZE ((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(pte_t))
+#define TABLE_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
*/
reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
-#ifdef CONFIG_CPU_32
/*
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
PTRS_PER_PGD * sizeof(pgd_t));
-#endif
+
/*
* And don't forget to reserve the allocator bitmap,
* which will be freed later.
*/
arch_adjust_zones(node, zone_size, zhole_size);
- free_area_init_node(node, pgdat, 0, zone_size,
+ free_area_init_node(node, pgdat, NULL, zone_size,
bdata->node_boot_start >> PAGE_SHIFT, zhole_size);
}
* stack+task struct. Use the same method as 'current' uses to
* reach them.
*/
-register unsigned int *user_registers asm("sl");
+register unsigned long *user_registers asm("sl");
#define GET_USERREG() (user_registers)
#include <asm/uaccess.h>
-static inline void loadSingle(const unsigned int Fn, const unsigned int *pMem)
+static inline void loadSingle(const unsigned int Fn, const unsigned int __user *pMem)
{
FPA11 *fpa11 = GET_FPA11();
fpa11->fType[Fn] = typeSingle;
get_user(fpa11->fpreg[Fn].fSingle, pMem);
}
-static inline void loadDouble(const unsigned int Fn, const unsigned int *pMem)
+static inline void loadDouble(const unsigned int Fn, const unsigned int __user *pMem)
{
FPA11 *fpa11 = GET_FPA11();
unsigned int *p;
}
#ifdef CONFIG_FPE_NWFPE_XP
-static inline void loadExtended(const unsigned int Fn, const unsigned int *pMem)
+static inline void loadExtended(const unsigned int Fn, const unsigned int __user *pMem)
{
FPA11 *fpa11 = GET_FPA11();
unsigned int *p;
}
#endif
-static inline void loadMultiple(const unsigned int Fn, const unsigned int *pMem)
+static inline void loadMultiple(const unsigned int Fn, const unsigned int __user *pMem)
{
FPA11 *fpa11 = GET_FPA11();
register unsigned int *p;
}
}
-static inline void storeSingle(const unsigned int Fn, unsigned int *pMem)
+static inline void storeSingle(const unsigned int Fn, unsigned int __user *pMem)
{
FPA11 *fpa11 = GET_FPA11();
union {
put_user(val.i[0], pMem);
}
-static inline void storeDouble(const unsigned int Fn, unsigned int *pMem)
+static inline void storeDouble(const unsigned int Fn, unsigned int __user *pMem)
{
FPA11 *fpa11 = GET_FPA11();
union {
}
#ifdef CONFIG_FPE_NWFPE_XP
-static inline void storeExtended(const unsigned int Fn, unsigned int *pMem)
+static inline void storeExtended(const unsigned int Fn, unsigned int __user *pMem)
{
FPA11 *fpa11 = GET_FPA11();
union {
}
#endif
-static inline void storeMultiple(const unsigned int Fn, unsigned int *pMem)
+static inline void storeMultiple(const unsigned int Fn, unsigned int __user *pMem)
{
FPA11 *fpa11 = GET_FPA11();
register unsigned int nType, *p;
unsigned int PerformLDF(const unsigned int opcode)
{
- unsigned int *pBase, *pAddress, *pFinal, nRc = 1,
- write_back = WRITE_BACK(opcode);
+ unsigned int __user *pBase, *pAddress, *pFinal;
+ unsigned int nRc = 1, write_back = WRITE_BACK(opcode);
- pBase = (unsigned int *) readRegister(getRn(opcode));
+ pBase = (unsigned int __user *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned int) pFinal);
+ writeRegister(getRn(opcode), (unsigned long) pFinal);
return nRc;
}
unsigned int PerformSTF(const unsigned int opcode)
{
- unsigned int *pBase, *pAddress, *pFinal, nRc = 1,
- write_back = WRITE_BACK(opcode);
+ unsigned int __user *pBase, *pAddress, *pFinal;
+ unsigned int nRc = 1, write_back = WRITE_BACK(opcode);
SetRoundingMode(ROUND_TO_NEAREST);
- pBase = (unsigned int *) readRegister(getRn(opcode));
+ pBase = (unsigned int __user *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned int) pFinal);
+ writeRegister(getRn(opcode), (unsigned long) pFinal);
return nRc;
}
unsigned int PerformLFM(const unsigned int opcode)
{
- unsigned int i, Fd, *pBase, *pAddress, *pFinal,
- write_back = WRITE_BACK(opcode);
+ unsigned int __user *pBase, *pAddress, *pFinal;
+ unsigned int i, Fd, write_back = WRITE_BACK(opcode);
- pBase = (unsigned int *) readRegister(getRn(opcode));
+ pBase = (unsigned int __user *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned int) pFinal);
+ writeRegister(getRn(opcode), (unsigned long) pFinal);
return 1;
}
unsigned int PerformSFM(const unsigned int opcode)
{
- unsigned int i, Fd, *pBase, *pAddress, *pFinal,
- write_back = WRITE_BACK(opcode);
+ unsigned int __user *pBase, *pAddress, *pFinal;
+ unsigned int i, Fd, write_back = WRITE_BACK(opcode);
- pBase = (unsigned int *) readRegister(getRn(opcode));
+ pBase = (unsigned int __user *) readRegister(getRn(opcode));
if (REG_PC == getRn(opcode)) {
pBase += 2;
write_back = 0;
}
if (write_back)
- writeRegister(getRn(opcode), (unsigned int) pFinal);
+ writeRegister(getRn(opcode), (unsigned long) pFinal);
return 1;
}
#ifdef CONFIG_DEBUG_USER
printk(KERN_DEBUG
- "NWFPE: %s[%d] takes exception %08x at %p from %08x\n",
+ "NWFPE: %s[%d] takes exception %08x at %p from %08lx\n",
current->comm, current->pid, flags,
__builtin_return_address(0), GET_USERREG()[15]);
#endif
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-extern __inline__
-unsigned int readRegister(const unsigned int nReg)
+static inline unsigned long readRegister(const unsigned int nReg)
{
/* Note: The CPU thinks it has dealt with the current instruction.
As a result the program counter has been advanced to the next
for this in this routine. LDF/STF instructions with Rn = PC
depend on the PC being correct, as they use PC+8 in their
address calculations. */
- unsigned int *userRegisters = GET_USERREG();
+ unsigned long *userRegisters = GET_USERREG();
unsigned int val = userRegisters[nReg];
if (REG_PC == nReg)
val -= 4;
return val;
}
-extern __inline__
-void writeRegister(const unsigned int nReg, const unsigned int val)
+static inline void
+writeRegister(const unsigned int nReg, const unsigned long val)
{
- unsigned int *userRegisters = GET_USERREG();
+ unsigned long *userRegisters = GET_USERREG();
userRegisters[nReg] = val;
}
-extern __inline__
-unsigned int readCPSR(void)
+static inline unsigned long readCPSR(void)
{
return (readRegister(REG_CPSR));
}
-extern __inline__
-void writeCPSR(const unsigned int val)
+static inline void writeCPSR(const unsigned long val)
{
writeRegister(REG_CPSR, val);
}
-extern __inline__
-unsigned int readConditionCodes(void)
+static inline unsigned long readConditionCodes(void)
{
#ifdef __FPEM_TEST__
return (0);
#endif
}
-extern __inline__
-void writeConditionCodes(const unsigned int val)
+static inline void writeConditionCodes(const unsigned long val)
{
- unsigned int *userRegisters = GET_USERREG();
- unsigned int rval;
+ unsigned long *userRegisters = GET_USERREG();
+ unsigned long rval;
/*
* Operate directly on userRegisters since
* the CPSR may be the PC register itself.
rval = userRegisters[REG_CPSR] & ~CC_MASK;
userRegisters[REG_CPSR] = rval | (val & CC_MASK);
}
-
-extern __inline__
-unsigned int readMemoryInt(unsigned int *pMem)
-{
- return *pMem;
-}
libs-y += arch/i386/lib/
core-y += arch/i386/kernel/ \
arch/i386/mm/ \
- arch/i386/$(mcore-y)/
+ arch/i386/$(mcore-y)/ \
+ arch/i386/crypto/
drivers-$(CONFIG_MATH_EMULATION) += arch/i386/math-emu/
drivers-$(CONFIG_PCI) += arch/i386/pci/
# must be linked after kernel/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
memset(line, 0, LINE_SIZE);
if (len > LINE_SIZE)
len = LINE_SIZE;
switch (cmd) {
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
case MTRRIOC_ADD_ENTRY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
.long sys_madvise
.long sys_getdents64 /* 220 */
.long sys_fcntl64
- .long sys_ni_syscall /* reserved for TUX */
+#ifdef CONFIG_TUX
+ .long __sys_tux
+#else
+# ifdef CONFIG_TUX_MODULE
+ .long sys_tux
+# else
+ .long sys_ni_syscall
+# endif
+#endif
.long sys_ni_syscall
.long sys_gettid
.long sys_readahead /* 225 */
} irq_2_pin[PIN_MAP_SIZE];
int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
#define vector_to_irq(vector) \
(platform_legacy_irq(vector) ? vector : vector_irq[vector])
#else
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
int assign_irq_vector(int irq)
#else
int __init assign_irq_vector(int irq)
reg_00.raw = io_apic_read(apic, 0);
spin_unlock_irqrestore(&ioapic_lock, flags);
if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
- panic("could not set ID!\n");
+ printk("could not set ID!\n");
else
printk(" ok.\n");
}
}
}
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
static unsigned int startup_edge_ioapic_vector(unsigned int vector)
{
int irq = vector_to_irq(vector);
return;
}
printk(" failed :(.\n");
- panic("IO-APIC + timer doesn't work! pester mingo@redhat.com");
+ panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
}
/*
return 0;
}
-/*
- * Get a random word:
- */
-static inline unsigned int get_random_int(void)
-{
- unsigned int val = 0;
-
- if (!exec_shield_randomize)
- return 0;
-
-#ifdef CONFIG_X86_HAS_TSC
- rdtscl(val);
-#endif
- val += current->pid + jiffies + (int)&val;
-
- /*
- * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
- * every second, from the entropy pool (and thus creates a limited
- * drain on it), and uses halfMD4Transform within the second. We
- * also spice it with the TSC (if available), jiffies, PID and the
- * stack address:
- */
- return secure_ip_id(val);
-}
unsigned long arch_align_stack(unsigned long sp)
{
return sp & ~0xf;
}
-#if SHLIB_BASE >= 0x01000000
-# error SHLIB_BASE must be under 16MB!
-#endif
-
-static unsigned long
-arch_get_unmapped_nonexecutable_area(struct mm_struct *mm, unsigned long addr, unsigned long len)
-{
- struct vm_area_struct *vma, *prev_vma;
- unsigned long stack_limit;
- int first_time = 1;
-
- if (!mm->mmap_top) {
- printk("hm, %s:%d, !mmap_top.\n", current->comm, current->pid);
- mm->mmap_top = mmap_top();
- }
- stack_limit = mm->mmap_top;
-
- /* requested length too big for entire address space */
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- /* dont allow allocations above current stack limit */
- if (mm->non_executable_cache > stack_limit)
- mm->non_executable_cache = stack_limit;
-
- /* requesting a specific address */
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
- return addr;
- }
-
- /* make sure it can fit in the remaining address space */
- if (mm->non_executable_cache < len)
- return -ENOMEM;
-
- /* either no address requested or cant fit in requested address hole */
-try_again:
- addr = (mm->non_executable_cache - len)&PAGE_MASK;
- do {
- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
- return -ENOMEM;
-
- /* new region fits between prev_vma->vm_end and vma->vm_start, use it */
- if (addr+len <= vma->vm_start && (!prev_vma || (addr >= prev_vma->vm_end))) {
- /* remember the address as a hint for next time */
- mm->non_executable_cache = addr;
- return addr;
-
- /* pull non_executable_cache down to the first hole */
- } else if (mm->non_executable_cache == vma->vm_end)
- mm->non_executable_cache = vma->vm_start;
-
- /* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
- } while (len <= vma->vm_start);
- /* if hint left us with no space for the requested mapping try again */
- if (first_time) {
- first_time = 0;
- mm->non_executable_cache = stack_limit;
- goto try_again;
- }
- return -ENOMEM;
-}
-
-static unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len)
-{
- unsigned long range = end - len - start;
- if (end <= start + len)
- return 0;
- return PAGE_ALIGN(get_random_int() % range + start);
-}
-
-static inline unsigned long
-stock_arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long start_addr;
-
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
- return addr;
- }
- start_addr = addr = mm->free_area_cache;
-
-full_search:
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr) {
- /*
- * Start a new search - just in case we missed
- * some holes.
- */
- if (start_addr != TASK_UNMAPPED_BASE) {
- start_addr = addr = TASK_UNMAPPED_BASE;
- goto full_search;
- }
- return -ENOMEM;
- }
- if (!vma || addr + len <= vma->vm_start) {
- /*
- * Remember the place where we stopped the search:
- */
- mm->free_area_cache = addr + len;
- return addr;
- }
- addr = vma->vm_end;
- }
-}
-
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
- unsigned long len0, unsigned long pgoff, unsigned long flags,
- unsigned long prot)
-{
- unsigned long addr = addr0, len = len0;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- int ascii_shield = 0;
- unsigned long tmp;
-
- /*
- * Fall back to the old layout:
- */
- if (!(current->flags & PF_RELOCEXEC))
- return stock_arch_get_unmapped_area(filp, addr0, len0, pgoff, flags);
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (!addr && (prot & PROT_EXEC) && !(flags & MAP_FIXED))
- addr = randomize_range(SHLIB_BASE, 0x01000000, len);
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start)) {
- return addr;
- }
- }
-
- if (prot & PROT_EXEC) {
- ascii_shield = 1;
- addr = SHLIB_BASE;
- } else {
- /* this can fail if the stack was unlimited */
- if ((tmp = arch_get_unmapped_nonexecutable_area(mm, addr, len)) != -ENOMEM)
- return tmp;
-search_upper:
- addr = PAGE_ALIGN(arch_align_stack(TASK_UNMAPPED_BASE));
- }
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr) {
- return -ENOMEM;
- }
- if (!vma || addr + len <= vma->vm_start) {
- /*
- * Must not let a PROT_EXEC mapping get into the
- * brk area:
- */
- if (ascii_shield && (addr + len > mm->brk)) {
- ascii_shield = 0;
- goto search_upper;
- }
- /*
- * Up until the brk area we randomize addresses
- * as much as possible:
- */
- if (ascii_shield && (addr >= 0x01000000)) {
- tmp = randomize_range(0x01000000, mm->brk, len);
- vma = find_vma(mm, tmp);
- if (TASK_SIZE - len >= tmp &&
- (!vma || tmp + len <= vma->vm_start))
- return tmp;
- }
- /*
- * Ok, randomization didnt work out - return
- * the result of the linear search:
- */
- return addr;
- }
- addr = vma->vm_end;
- }
-}
void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
{
current->mm->brk = new_brk;
}
-/*
- * Top of mmap area (just below the process stack).
- * leave an at least ~128 MB hole. Randomize it.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-unsigned long mmap_top(void)
-{
- unsigned long gap = 0;
-
- gap = current->rlim[RLIMIT_STACK].rlim_cur;
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- gap = arch_align_stack(gap) & PAGE_MASK;
-
- return TASK_SIZE - gap;
-}
-
See vsyscall-sigreturn.S. */
extern void __user __kernel_sigreturn;
extern void __user __kernel_rt_sigreturn;
-extern SYSENTER_RETURN;
static void setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs * regs)
union semun fourth;
if (!ptr)
return -EINVAL;
- if (get_user(fourth.__pad, (void * __user *) ptr))
+ if (get_user(fourth.__pad, (void __user * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
extern void SYSENTER_RETURN_OFFSET;
-unsigned int vdso_enabled = 1;
+unsigned int vdso_enabled = 0;
void map_vsyscall(void)
{
{
printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
printk("You probably have a hardware problem with your RAM chips\n");
-
+ panic("Halting\n");
/* Clear and disable the memory parity error line. */
clear_mem_error(reason);
}
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/init.h>
+#include <linux/dmi.h>
#include <asm/mach-bigsmp/mach_apic.h>
#include <asm/mach-bigsmp/mach_apicdef.h>
#include <asm/mach-bigsmp/mach_ipi.h>
#include <asm/mach-default/mach_mpparse.h>
-int dmi_bigsmp; /* can be set by dmi scanners */
+static int dmi_bigsmp; /* can be set by dmi scanners */
+
+static __init int hp_ht_bigsmp(struct dmi_system_id *d)
+{
+#ifdef CONFIG_X86_GENERICARCH
+ printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
+ dmi_bigsmp = 1;
+#endif
+ return 0;
+}
+
+
+static struct dmi_system_id __initdata bigsmp_dmi_table[] = {
+ { hp_ht_bigsmp, "HP ProLiant DL760 G2", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
+ DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
+ }},
+
+ { hp_ht_bigsmp, "HP ProLiant DL740", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
+ DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
+ }},
+ { }
+};
+
static __init int probe_bigsmp(void)
{
+ dmi_check_system(bigsmp_dmi_table);
return dmi_bigsmp;
}
# Makefile for the linux i386-specific parts of the memory manager.
#
-obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o
+obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o mmap.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
void __init set_max_mapnr_init(void)
{
#ifdef CONFIG_HIGHMEM
- highmem_start_page = NODE_DATA(0)->node_zones[ZONE_HIGHMEM].zone_mem_map;
+ struct zone *high0 = &NODE_DATA(0)->node_zones[ZONE_HIGHMEM];
+ if (high0->spanned_pages > 0)
+ highmem_start_page = high0->zone_mem_map;
+ else
+ highmem_start_page = pfn_to_page(max_low_pfn+1);
num_physpages = highend_pfn;
#else
num_physpages = max_low_pfn;
extern int is_available_memory(efi_memory_desc_t *);
-int page_is_ram(unsigned long pagenr)
+static inline int page_is_ram(unsigned long pagenr)
{
int i;
unsigned long addr, end;
return 0;
}
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
+ * valid. The argument is a physical page number.
+ *
+ *
+ * On x86, access has to be given to the first megabyte of ram because that area
+ * contains bios code and data regions used by X and dosemu and similar apps.
+ * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+ * mmio resources as well as potential bios/acpi data regions.
+ */
+int devmem_is_allowed(unsigned long pagenr)
+{
+ if (pagenr <= 256)
+ return 1;
+ if (!page_is_ram(pagenr))
+ return 1;
+ return 0;
+}
+
+
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
if ( dev2->irq && dev2->irq != irq && \
(!(pci_probe & PCI_USE_PIRQ_MASK) || \
((1 << dev2->irq) & mask)) ) {
-#ifndef CONFIG_PCI_USE_VECTOR
+#ifndef CONFIG_PCI_MSI
printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
pci_name(dev2), dev2->irq, irq);
#endif
}
dev = temp_dev;
if (irq >= 0) {
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
if (!platform_legacy_irq(irq))
irq = IO_APIC_VECTOR(irq);
#endif
config DISCONTIGMEM
bool "Discontiguous memory support"
- depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC) && NUMA && VIRTUAL_MEM_MAP
+ depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1) && NUMA && VIRTUAL_MEM_MAP
default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA
help
Say Y to support efficient handling of discontiguous physical memory,
See <file:Documentation/vm/numa> for more.
config IA64_CYCLONE
- bool "Support Cyclone(EXA) Time Source"
+ bool "Cyclone (EXA) Time Source support"
help
- Say Y here to enable support for IBM EXA Cyclone time source.
- If you're unsure, answer N.
+ Say Y here to enable support for IBM EXA Cyclone time source.
+ If you're unsure, answer N.
config IOSAPIC
bool
core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
-ifeq ($(CONFIG_DISCONTIGMEM),y)
- core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
-endif
+core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
+
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
-drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
-ifeq ($(CONFIG_DISCONTIGMEM),y)
-drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/sn/
-endif
+drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
boot := arch/ia64/hp/sim/boot
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
#
# General setup
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=20
+CONFIG_HOTPLUG=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
CONFIG_OBSOLETE_MODPARM=y
CONFIG_MODVERSIONS=y
CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
#
# Processor type and features
CONFIG_MMU=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_TIME_INTERPOLATION=y
-# CONFIG_ITANIUM is not set
-CONFIG_MCKINLEY=y
+CONFIG_EFI=y
CONFIG_IA64_GENERIC=y
# CONFIG_IA64_DIG is not set
-# CONFIG_IA64_HP_SIM is not set
# CONFIG_IA64_HP_ZX1 is not set
# CONFIG_IA64_SGI_SN2 is not set
+# CONFIG_IA64_HP_SIM is not set
+# CONFIG_ITANIUM is not set
+CONFIG_MCKINLEY=y
# CONFIG_IA64_PAGE_SIZE_4KB is not set
# CONFIG_IA64_PAGE_SIZE_8KB is not set
CONFIG_IA64_PAGE_SIZE_16KB=y
# CONFIG_IA64_PAGE_SIZE_64KB is not set
-CONFIG_ACPI=y
-CONFIG_ACPI_INTERPRETER=y
-CONFIG_ACPI_KERNEL_CONFIG=y
CONFIG_IA64_L1_CACHE_SHIFT=7
-# CONFIG_MCKINLEY_ASTEP_SPECIFIC is not set
CONFIG_NUMA=y
-CONFIG_DISCONTIGMEM=y
CONFIG_VIRTUAL_MEM_MAP=y
-CONFIG_IA64_MCA=y
-CONFIG_PM=y
+CONFIG_DISCONTIGMEM=y
+CONFIG_IA64_CYCLONE=y
CONFIG_IOSAPIC=y
CONFIG_FORCE_MAX_ZONEORDER=18
-# CONFIG_HUGETLB_PAGE_SIZE_4GB is not set
-# CONFIG_HUGETLB_PAGE_SIZE_1GB is not set
-# CONFIG_HUGETLB_PAGE_SIZE_256MB is not set
-# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
-CONFIG_HUGETLB_PAGE_SIZE_16MB=y
-# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
-# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
-# CONFIG_HUGETLB_PAGE_SIZE_256KB is not set
-# CONFIG_IA64_PAL_IDLE is not set
CONFIG_SMP=y
+CONFIG_NR_CPUS=512
+CONFIG_HOTPLUG_CPU=y
# CONFIG_PREEMPT is not set
+CONFIG_HAVE_DEC_LOCK=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
-CONFIG_HAVE_DEC_LOCK=y
CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
-CONFIG_EFI=y
+
+#
+# Firmware Drivers
+#
CONFIG_EFI_VARS=y
-CONFIG_NR_CPUS=512
+CONFIG_EFI_PCDP=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
+#
+# Power management and ACPI
+#
+CONFIG_PM=y
+CONFIG_ACPI=y
+
#
# ACPI (Advanced Configuration and Power Interface) Support
#
CONFIG_ACPI_BOOT=y
+CONFIG_ACPI_INTERPRETER=y
CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_POWER=y
CONFIG_ACPI_PCI=y
CONFIG_ACPI_SYSTEM=y
+
+#
+# Bus options (PCI, PCMCIA)
+#
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_MSI is not set
CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_NAMES=y
-CONFIG_HOTPLUG=y
#
# PCI Hotplug Support
# CONFIG_HOTPLUG_PCI_FAKE is not set
CONFIG_HOTPLUG_PCI_ACPI=m
# CONFIG_HOTPLUG_PCI_CPCI is not set
+# CONFIG_HOTPLUG_PCI_PCIE is not set
+# CONFIG_HOTPLUG_PCI_SHPC is not set
#
# PCMCIA/CardBus support
# CONFIG_PCMCIA is not set
#
-# Parallel port support
+# Device Drivers
#
-# CONFIG_PARPORT is not set
#
# Generic Driver Options
#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
#
# Memory Technology Devices (MTD)
#
# CONFIG_MTD is not set
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
#
# Plug and Play support
#
-# CONFIG_PNP is not set
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
+# CONFIG_BLK_DEV_SX8 is not set
CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM_SIZE=4096
-# CONFIG_BLK_DEV_INITRD is not set
#
# ATA/ATAPI/MFM/RLL support
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
+# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
-# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
# CONFIG_BLK_DEV_IDETAPE is not set
CONFIG_BLK_DEV_IDEFLOPPY=y
#
# IDE chipset support/bugfixes
#
+CONFIG_IDE_GENERIC=y
CONFIG_BLK_DEV_IDEPCI=y
# CONFIG_IDEPCI_SHARE_IRQ is not set
# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_BLK_DEV_PDC202XX_OLD is not set
# CONFIG_BLK_DEV_PDC202XX_NEW is not set
# CONFIG_BLK_DEV_SVWKS is not set
+CONFIG_BLK_DEV_SGIIOC4=y
# CONFIG_BLK_DEV_SIIMAGE is not set
# CONFIG_BLK_DEV_SLC90E66 is not set
# CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_IVB is not set
CONFIG_IDEDMA_AUTO=y
# CONFIG_BLK_DEV_HD is not set
-#
-# IEEE 1394 (FireWire) support (EXPERIMENTAL)
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID5=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_BLK_DEV_DM=m
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=y
-CONFIG_FUSION_BOOT=y
-CONFIG_FUSION_MAX_SGE=40
-# CONFIG_FUSION_ISENSE is not set
-# CONFIG_FUSION_CTL is not set
-
#
# SCSI device support
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
-CONFIG_SCSI_REPORT_LUNS=y
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
+#
+# SCSI Transport Attributes
+#
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=y
+
#
# SCSI low-level drivers
#
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_MEGARAID is not set
# CONFIG_SCSI_SATA is not set
# CONFIG_SCSI_BUSLOGIC is not set
-# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_EATA is not set
# CONFIG_SCSI_EATA_PIO is not set
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
CONFIG_SCSI_QLOGIC_FC=y
# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
CONFIG_SCSI_QLOGIC_1280=y
+CONFIG_SCSI_QLA2XXX=y
+CONFIG_SCSI_QLA21XX=m
+CONFIG_SCSI_QLA22XX=m
+CONFIG_SCSI_QLA2300=m
+CONFIG_SCSI_QLA2322=m
+# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_QLA6322 is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
-# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+
+#
+# Fusion MPT device support
+#
+CONFIG_FUSION=y
+CONFIG_FUSION_MAX_SGE=40
+# CONFIG_FUSION_ISENSE is not set
+# CONFIG_FUSION_CTL is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
#
# Networking support
#
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
CONFIG_ARPD=y
-# CONFIG_INET_ECN is not set
CONFIG_SYN_COOKIES=y
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_IPV6 is not set
-# CONFIG_DECNET is not set
-# CONFIG_BRIDGE is not set
# CONFIG_NETFILTER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
-CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
CONFIG_DUMMY=m
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_ETHERTAP is not set
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
#
# Ethernet (10 or 100Mbit)
#
CONFIG_TULIP=m
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
+# CONFIG_TULIP_NAPI is not set
# CONFIG_DE4X5 is not set
# CONFIG_WINBOND_840 is not set
# CONFIG_DM9102 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
# CONFIG_DGRS is not set
CONFIG_EEPRO100=m
# CONFIG_EEPRO100_PIO is not set
CONFIG_E100=m
+# CONFIG_E100_NAPI is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
# CONFIG_NE2K_PCI is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_VELOCITY is not set
#
# Ethernet (1000 Mbit)
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
-# CONFIG_SIS190 is not set
# CONFIG_SK98LIN is not set
CONFIG_TIGON3=y
# Ethernet (10000 Mbit)
#
# CONFIG_IXGB is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
+# CONFIG_S2IO is not set
#
# Token Ring devices
#
# CONFIG_TR is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
#
-# Amateur Radio support
+# Wireless LAN (non-hamradio)
#
-# CONFIG_HAMRADIO is not set
+# CONFIG_NET_RADIO is not set
#
-# IrDA (infrared) support
+# Wan interfaces
#
-# CONFIG_IRDA is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+CONFIG_NETCONSOLE=y
#
-# Bluetooth support
+# ISDN subsystem
#
-# CONFIG_BT is not set
+# CONFIG_ISDN is not set
#
-# ISDN subsystem
+# Telephony Support
#
-# CONFIG_ISDN_BOOL is not set
+# CONFIG_PHONE is not set
#
# Input device support
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
CONFIG_HW_CONSOLE=y
CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
# CONFIG_SYNCLINK is not set
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
# CONFIG_STALDRV is not set
-CONFIG_SGI_L1_SERIAL=y
-CONFIG_SGI_L1_SERIAL_CONSOLE=y
#
# Serial drivers
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_ACPI=y
-CONFIG_SERIAL_8250_HCDP=y
CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_SGI_L1_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_UNIX98_PTY_COUNT=256
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# I2C Algorithms
-#
-
-#
-# I2C Hardware Bus support
-#
-
-#
-# I2C Hardware Sensors Chip support
-#
-# CONFIG_I2C_SENSOR is not set
-
-#
-# Mice
-#
-# CONFIG_BUSMOUSE is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_QIC02_TAPE is not set
#
#
# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_NVRAM is not set
-# CONFIG_GEN_RTC is not set
CONFIG_EFI_RTC=y
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
CONFIG_RAW_DRIVER=m
+CONFIG_HPET=y
+# CONFIG_HPET_RTC_IRQ is not set
+CONFIG_HPET_MMAP=y
CONFIG_MAX_RAW_DEVS=256
#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-CONFIG_REISERFS_FS=m
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-# CONFIG_JFS_FS is not set
-CONFIG_FS_POSIX_ACL=y
-CONFIG_XFS_FS=y
-# CONFIG_XFS_RT is not set
-# CONFIG_XFS_QUOTA is not set
-# CONFIG_XFS_POSIX_ACL is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-# CONFIG_ZISOFS is not set
-CONFIG_UDF_FS=m
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=y
-# CONFIG_MSDOS_FS is not set
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=m
-# CONFIG_NTFS_DEBUG is not set
-# CONFIG_NTFS_RW is not set
-
-#
-# Pseudo filesystems
+# I2C support
#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-# CONFIG_DEVFS_FS is not set
-CONFIG_DEVPTS_FS=y
-# CONFIG_DEVPTS_FS_XATTR is not set
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
-CONFIG_RAMFS=y
+# CONFIG_I2C is not set
#
-# Miscellaneous filesystems
+# Dallas's 1-wire bus
#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
+# CONFIG_W1 is not set
#
-# Network File Systems
+# Misc devices
#
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
-CONFIG_LOCKD=m
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_GSS is not set
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
-CONFIG_SMB_NLS_REMOTE="cp437"
-CONFIG_CIFS=m
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_INTERMEZZO_FS is not set
-# CONFIG_AFS_FS is not set
#
-# Partition Types
+# Multimedia devices
#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-# CONFIG_LDM_PARTITION is not set
-# CONFIG_NEC98_PARTITION is not set
-CONFIG_SGI_PARTITION=y
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-CONFIG_EFI_PARTITION=y
+# CONFIG_VIDEO_DEV is not set
#
-# Native Language Support
+# Digital Video Broadcasting Devices
#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
+# CONFIG_DVB is not set
#
# Graphics support
# Advanced Linux Sound Architecture
#
CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_OSSEMUL=y
#
# Generic devices
#
+CONFIG_SND_MPU401_UART=m
+CONFIG_SND_OPL3_LIB=m
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
#
# PCI devices
#
+CONFIG_SND_AC97_CODEC=m
# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
CONFIG_SND_CS46XX=m
CONFIG_SND_CS46XX_NEW_DSP=y
CONFIG_SND_CS4281=m
CONFIG_SND_EMU10K1=m
# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MIXART is not set
# CONFIG_SND_NM256 is not set
# CONFIG_SND_RME32 is not set
# CONFIG_SND_RME96 is not set
# CONFIG_SND_ES1968 is not set
# CONFIG_SND_MAESTRO3 is not set
CONFIG_SND_FM801=m
+# CONFIG_SND_FM801_TEA575X is not set
# CONFIG_SND_ICE1712 is not set
# CONFIG_SND_ICE1724 is not set
# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
# CONFIG_SND_SONICVIBES is not set
# CONFIG_SND_VIA82XX is not set
# CONFIG_SND_VX222 is not set
# USB Host Controller Drivers
#
CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_SPLIT_ISO is not set
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_UHCI_HCD=m
# CONFIG_USB_PRINTER is not set
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_RW_DETECT is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set
# CONFIG_USB_STORAGE_ISD200 is not set
# CONFIG_USB_WACOM is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_EGALAX is not set
# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
#
# USB Imaging devices
#
# CONFIG_USB_MDC800 is not set
-# CONFIG_USB_SCANNER is not set
# CONFIG_USB_MICROTEK is not set
# CONFIG_USB_HPUSBSCSI is not set
#
# USB Miscellaneous drivers
#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
# CONFIG_USB_TIGL is not set
# CONFIG_USB_AUERSWALD is not set
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_TEST is not set
+
+#
+# USB Gadget Support
+#
# CONFIG_USB_GADGET is not set
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=y
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_SECURITY is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=m
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_DIRECTIO=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_TCP=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+CONFIG_RPCSEC_GSS_KRB5=m
+CONFIG_SMB_FS=m
+CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_SMB_NLS_REMOTE="cp437"
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_POSIX is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+
#
# Library routines
#
+# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
#
# HP Simulator drivers
# CONFIG_IA64_DEBUG_CMPXCHG is not set
# CONFIG_IA64_DEBUG_IRQ is not set
# CONFIG_DEBUG_INFO is not set
+CONFIG_SYSVIPC_COMPAT=y
#
# Security options
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_MD4 is not set
-# CONFIG_CRYPTO_MD5 is not set
+CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_SHA1 is not set
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_DES is not set
+CONFIG_CRYPTO_DES=m
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_TWOFISH is not set
# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_AES_GENERIC is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
# CONFIG_CRYPTO_TEST is not set
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
# Firmware Drivers
#
CONFIG_EFI_VARS=y
+# CONFIG_EFI_PCDP is not set
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_MEGARAID is not set
CONFIG_SCSI_SATA=y
# CONFIG_SCSI_SATA_SVW is not set
# CONFIG_SYNCLINKMP is not set
# CONFIG_N_HDLC is not set
# CONFIG_STALDRV is not set
-CONFIG_SGI_L1_SERIAL=y
-CONFIG_SGI_L1_SERIAL_CONSOLE=y
#
# Serial drivers
#
# Non-8250 serial port support
#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_SGI_L1_CONSOLE=y
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
#
# CONFIG_I2C is not set
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
#
# Misc devices
#
CONFIG_JOLIET=y
# CONFIG_ZISOFS is not set
CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
#
# DOS/FAT/NT Filesystems
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
#
# Library routines
#
+# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=m
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=16
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
# Firmware Drivers
#
CONFIG_EFI_VARS=y
+CONFIG_EFI_PCDP=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=y
#
# Generic Driver Options
#
+CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_CARMEL is not set
+# CONFIG_BLK_DEV_SX8 is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
+# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
CONFIG_IDEDISK_MULTI_MODE=y
CONFIG_BLK_DEV_IDECD=y
# SCSI low-level drivers
#
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=m
# CONFIG_DM_CRYPT is not set
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_ZERO is not set
#
# Fusion MPT device support
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_VELOCITY is not set
#
# Ethernet (1000 Mbit)
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_HCDP=y
CONFIG_SERIAL_8250_ACPI=y
CONFIG_SERIAL_8250_NR_UARTS=4
# CONFIG_SERIAL_8250_EXTENDED is not set
# CONFIG_DRM_MGA is not set
# CONFIG_DRM_SIS is not set
# CONFIG_RAW_DRIVER is not set
+# CONFIG_HPET is not set
#
# I2C support
#
# CONFIG_I2C_SENSOR is not set
# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ASB100 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_FSCHER is not set
# CONFIG_SENSORS_GL518SM is not set
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
# CONFIG_SENSORS_LM78 is not set
# CONFIG_SENSORS_LM80 is not set
# CONFIG_SENSORS_LM83 is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
#
# Misc devices
#
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
CONFIG_FB_RIVA=m
+CONFIG_FB_RIVA_I2C=y
# CONFIG_FB_MATROX is not set
# CONFIG_FB_RADEON_OLD is not set
CONFIG_FB_RADEON=m
# CONFIG_MDA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_PCI_CONSOLE=y
# CONFIG_FONTS is not set
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_JOLIET=y
# CONFIG_ZISOFS is not set
CONFIG_UDF_FS=y
+CONFIG_UDF_NLS=y
#
# DOS/FAT/NT Filesystems
CONFIG_FAT_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V4 is not set
-CONFIG_NFSD_TCP=y
+# CONFIG_NFSD_TCP is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=y
# CONFIG_SOLARIS_X86_PARTITION is not set
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
-# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=y
-CONFIG_NLS_CODEPAGE_775=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_CODEPAGE_852=y
-CONFIG_NLS_CODEPAGE_855=y
-CONFIG_NLS_CODEPAGE_857=y
-CONFIG_NLS_CODEPAGE_860=y
-CONFIG_NLS_CODEPAGE_861=y
-CONFIG_NLS_CODEPAGE_862=y
-CONFIG_NLS_CODEPAGE_863=y
-CONFIG_NLS_CODEPAGE_864=y
-CONFIG_NLS_CODEPAGE_865=y
-CONFIG_NLS_CODEPAGE_866=y
-CONFIG_NLS_CODEPAGE_869=y
-CONFIG_NLS_CODEPAGE_936=y
-CONFIG_NLS_CODEPAGE_950=y
-CONFIG_NLS_CODEPAGE_932=y
-CONFIG_NLS_CODEPAGE_949=y
-CONFIG_NLS_CODEPAGE_874=y
-CONFIG_NLS_ISO8859_8=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
-CONFIG_NLS_CODEPAGE_1251=y
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=y
-CONFIG_NLS_ISO8859_3=y
-CONFIG_NLS_ISO8859_4=y
-CONFIG_NLS_ISO8859_5=y
-CONFIG_NLS_ISO8859_6=y
-CONFIG_NLS_ISO8859_7=y
-CONFIG_NLS_ISO8859_9=y
-CONFIG_NLS_ISO8859_13=y
-CONFIG_NLS_ISO8859_14=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_KOI8_R=y
-CONFIG_NLS_KOI8_U=y
-CONFIG_NLS_UTF8=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
#
# Library routines
#
+# CONFIG_CRC_CCITT is not set
CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
ASSERT(res_ptr < res_end);
+ /*
+ * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
+ * if a TLB entry is purged while in use. sba_mark_invalid()
+ * purges IOTLB entries in power-of-two sizes, so we also
+ * allocate IOVA space in power-of-two sizes.
+ */
+ bits_wanted = 1UL << get_iovp_order(bits_wanted << PAGE_SHIFT);
+
if (likely(bits_wanted == 1)) {
unsigned int bitshiftcnt;
for(; res_ptr < res_end ; res_ptr++) {
int bits_not_wanted = size >> iovp_shift;
unsigned long m;
+ /* Round up to power-of-two size: see AR2305 note above */
+ bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << PAGE_SHIFT);
for (; bits_not_wanted > 0 ; res_ptr++) {
if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
{
}
+static void
+hpsim_set_affinity_noop (unsigned int a, cpumask_t b)
+{
+}
+
static struct hw_interrupt_type irq_type_hp_sim = {
.typename = "hpsim",
.startup = hpsim_irq_startup,
.disable = hpsim_irq_noop,
.ack = hpsim_irq_noop,
.end = hpsim_irq_noop,
- .set_affinity = (void (*)(unsigned int, unsigned long)) hpsim_irq_noop,
+ .set_affinity = hpsim_set_affinity_noop,
};
void __init
printk(KERN_WARNING "%s: set_multicast_list called\n", dev->name);
}
-#ifdef CONFIG_NET_FASTROUTE
-static int
-simeth_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
-{
- printk(KERN_WARNING "%s: simeth_accept_fastpath called\n", dev->name);
- return -1;
-}
-#endif
-
__initcall(simeth_probe);
#undef SET_PERSONALITY
#define SET_PERSONALITY(ex, ibcs2) elf32_set_personality()
+#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
+
/* Ugly but avoids duplication */
#include "../../../fs/binfmt_elf.c"
if (!mpnt)
return -ENOMEM;
- if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))>>PAGE_SHIFT)) {
+ if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))
+ >> PAGE_SHIFT)) {
kmem_cache_free(vm_area_cachep, mpnt);
return -ENOMEM;
}
set_personality(PER_LINUX32);
current->thread.map_base = IA32_PAGE_OFFSET/3;
current->thread.task_size = IA32_PAGE_OFFSET; /* use what Linux/x86 uses... */
- current->thread.flags |= IA64_THREAD_XSTACK; /* data must be executable */
set_fs(USER_DS); /* set addr limit for new TASK_SIZE */
}
data8 compat_clock_gettime /* 265 */
data8 compat_clock_getres
data8 compat_clock_nanosleep
- data8 sys_statfs64
- data8 sys_fstatfs64
+ data8 compat_statfs64
+ data8 compat_fstatfs64
data8 sys_tgkill /* 270 */
data8 compat_sys_utimes
data8 sys32_fadvise64_64
if (BAD_MADT_ENTRY(lapic, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
-
if (lapic->address) {
iounmap((void *) ipi_base_addr);
ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
if (BAD_MADT_ENTRY(lsapic, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
-
- printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid);
-
- if (!lsapic->flags.enabled)
- printk(" disabled");
- else {
- printk(" enabled");
+ if (lsapic->flags.enabled) {
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
- if (hard_smp_processor_id()
- == (unsigned int) smp_boot_data.cpu_phys_id[available_cpus])
- printk(" (BSP)");
#endif
++available_cpus;
}
- printk("\n");
-
total_cpus++;
return 0;
}
if (BAD_MADT_ENTRY(lacpi_nmi, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
-
/* TBD: Support lapic_nmi entries */
return 0;
}
if (BAD_MADT_ENTRY(iosapic, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
-
iosapic_init(iosapic->address, iosapic->global_irq_base);
return 0;
if (BAD_MADT_ENTRY(plintsrc, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
-
/*
* Get vector assignment for this interrupt, set attributes,
* and program the IOSAPIC routing table.
if (BAD_MADT_ENTRY(p, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
-
iosapic_override_isa_irq(p->bus_irq, p->global_irq,
(p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
(p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
if (BAD_MADT_ENTRY(nmi_src, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
-
/* TBD: Support nimsrc entries */
return 0;
}
-/* Hook from generic ACPI tables.c */
-void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+static void __init
+acpi_madt_oem_check (char *oem_id, char *oem_table_id)
{
if (!strncmp(oem_id, "IBM", 3) &&
- (!strncmp(oem_table_id, "SERMOW", 6))){
+ (!strncmp(oem_table_id, "SERMOW", 6))) {
- /* Unfortunatly ITC_DRIFT is not yet part of the
+ /*
+ * Unfortunately ITC_DRIFT is not yet part of the
* official SAL spec, so the ITC_DRIFT bit is not
* set by the BIOS on this hardware.
*/
sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
- /*Start cyclone clock*/
- cyclone_setup(0);
+ cyclone_setup();
}
}
#define CYCLONE_TIMER_FREQ 100000000
int use_cyclone;
-int __init cyclone_setup(char *str)
+void __init cyclone_setup(void)
{
use_cyclone = 1;
- return 1;
}
static u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
data8 sys_syslog
data8 sys_setitimer
data8 sys_getitimer
+#ifdef CONFIG_TUX
+ data8 __sys_tux // 1120 /* was: ia64_oldstat */
+#else
+# ifdef CONFIG_TUX_MODULE
+ data8 sys_tux // 1120 /* was: ia64_oldstat */
+# else
data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */
+# endif
+#endif
data8 sys_ni_syscall /* was: ia64_oldlstat */
data8 sys_ni_syscall /* was: ia64_oldfstat */
data8 sys_vhangup
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
+#define __KERNEL_SYSCALLS__
#include <asm/unistd.h>
EXPORT_SYMBOL(__ia64_syscall);
+EXPORT_SYMBOL(execve);
+EXPORT_SYMBOL(clone);
/* from arch/ia64/lib */
extern void __divsi3(void);
/*
* This is updated when the user sets irq affinity via /proc
*/
-cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
+cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
+static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)];
#ifdef CONFIG_IA64_GENERIC
irq_desc_t * __ia64_irq_desc (unsigned int irq)
int prelen;
irq_desc_t *desc = irq_descp(irq);
unsigned long flags;
+ int redir = 0;
if (!desc->handler->set_affinity)
return -EIO;
prelen = 0;
if (tolower(*rbuf) == 'r') {
prelen = strspn(rbuf, "Rr ");
- irq |= IA64_IRQ_REDIRECTED;
+ redir++;
}
err = cpumask_parse(buffer+prelen, count-prelen, new_value);
spin_lock_irqsave(&desc->lock, flags);
pending_irq_cpumask[irq] = new_value;
+ if (redir)
+ set_bit(irq, pending_irq_redir);
+ else
+ clear_bit(irq, pending_irq_redir);
spin_unlock_irqrestore(&desc->lock, flags);
return full_count;
/* note - we hold desc->lock */
cpumask_t tmp;
irq_desc_t *desc = irq_descp(irq);
+ int redir = test_bit(irq, pending_irq_redir);
if (!cpus_empty(pending_irq_cpumask[irq])) {
cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
if (unlikely(!cpus_empty(tmp))) {
- desc->handler->set_affinity(irq, pending_irq_cpumask[irq]);
+ desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0),
+ pending_irq_cpumask[irq]);
}
cpus_clear(pending_irq_cpumask[irq]);
}
*/
static int cpe_poll_enabled = 1;
-static int cpe_vector = -1;
-
extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
+static int mca_init;
+
/*
* IA64_MCA log support
*/
#ifdef CONFIG_ACPI
+static int cpe_vector = -1;
+
static irqreturn_t
ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
{
}
IA64_MCA_DEBUG("%s: corrected platform error "
- "vector %#x setup and enabled\n", __FUNCTION__, cpev);
+ "vector %#x registered\n", __FUNCTION__, cpev);
}
#endif /* CONFIG_ACPI */
/*
* ia64_mca_cmc_vector_setup
*
- * Setup the corrected machine check vector register in the processor and
- * unmask interrupt. This function is invoked on a per-processor basis.
+ * Setup the corrected machine check vector register in the processor.
+ * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
+ * This function is invoked on a per-processor basis.
*
* Inputs
* None
cmcv_reg_t cmcv;
cmcv.cmcv_regval = 0;
- cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
+ cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
cmcv.cmcv_vector = IA64_CMC_VECTOR;
ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
IA64_MCA_DEBUG("%s: CPU %d corrected "
- "machine check vector %#x setup and enabled.\n",
+ "machine check vector %#x registered.\n",
__FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
*/
register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
- ia64_mca_cmc_vector_setup(); /* Setup vector on BSP & enable */
+ ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
/* Setup the MCA rendezvous interrupt vector */
register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
#ifdef CONFIG_ACPI
/* Setup the CPEI/P vector and handler */
- {
- irq_desc_t *desc;
- unsigned int irq;
-
- cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
-
- if (cpe_vector >= 0) {
- for (irq = 0; irq < NR_IRQS; ++irq)
- if (irq_to_vector(irq) == cpe_vector) {
- desc = irq_descp(irq);
- desc->status |= IRQ_PER_CPU;
- setup_irq(irq, &mca_cpe_irqaction);
- }
- ia64_mca_register_cpev(cpe_vector);
- }
- register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
- }
+ cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
+ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif
/* Initialize the areas set aside by the OS to buffer the
ia64_log_init(SAL_INFO_TYPE_CMC);
ia64_log_init(SAL_INFO_TYPE_CPE);
+ mca_init = 1;
printk(KERN_INFO "MCA related initialization done\n");
}
static int __init
ia64_mca_late_init(void)
{
+ if (!mca_init)
+ return 0;
+
+ /* Setup the CMCI/P vector and handler */
init_timer(&cmc_poll_timer);
cmc_poll_timer.function = ia64_mca_cmc_poll;
- /* Reset to the correct state */
+ /* Unmask/enable the vector */
cmc_polling_enabled = 0;
+ schedule_work(&cmc_enable_work);
+
+ IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
+#ifdef CONFIG_ACPI
+ /* Setup the CPEI/P vector and handler */
init_timer(&cpe_poll_timer);
cpe_poll_timer.function = ia64_mca_cpe_poll;
-#ifdef CONFIG_ACPI
- /* If platform doesn't support CPEI, get the timer going. */
- if (cpe_vector < 0 && cpe_poll_enabled) {
- ia64_mca_cpe_poll(0UL);
- } else {
- cpe_poll_enabled = 0;
+ {
+ irq_desc_t *desc;
+ unsigned int irq;
+
+ if (cpe_vector >= 0) {
+ /* If platform supports CPEI, enable the irq. */
+ cpe_poll_enabled = 0;
+ for (irq = 0; irq < NR_IRQS; ++irq)
+ if (irq_to_vector(irq) == cpe_vector) {
+ desc = irq_descp(irq);
+ desc->status |= IRQ_PER_CPU;
+ setup_irq(irq, &mca_cpe_irqaction);
+ }
+ ia64_mca_register_cpev(cpe_vector);
+ IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
+ } else {
+ /* If platform doesn't support CPEI, get the timer going. */
+ if (cpe_poll_enabled) {
+ ia64_mca_cpe_poll(0UL);
+ IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
+ }
+ }
}
#endif
static inline unsigned long
pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
{
- return get_unmapped_area(file, addr, len, pgoff, flags, 0);
+ return get_unmapped_area(file, addr, len, pgoff, flags);
}
mntput(pfmfs_mnt);
}
-static loff_t
-pfm_lseek(struct file *file, loff_t offset, int whence)
-{
- DPRINT(("pfm_lseek called\n"));
- return -ESPIPE;
-}
-
static ssize_t
pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
{
DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
return -EINVAL;
}
- /*
- * seeks are not allowed on message queues
- */
- if (ppos != &filp->f_pos) return -ESPIPE;
PROTECT_CTX(ctx, flags);
static struct file_operations pfm_file_ops = {
- .llseek = pfm_lseek,
+ .llseek = no_llseek,
.read = pfm_read,
.write = pfm_write,
.poll = pfm_poll,
*/
if (task == current || ctx->ctx_fl_system) return 0;
+ /*
+ * if context is UNLOADED we are safe to go
+ */
+ if (state == PFM_CTX_UNLOADED) return 0;
+
/*
* no command can operate on a zombie context
*/
}
/*
- * if context is UNLOADED, MASKED we are safe to go
- */
- if (state != PFM_CTX_LOADED) return 0;
-
- /*
- * context is LOADED, we must make sure the task is stopped
+ * context is LOADED or MASKED. Some commands may need to have
+ * the task stopped.
+ *
* We could lift this restriction for UP but it would mean that
* the user has no guarantee the task would not run between
* two successive calls to perfmonctl(). That's probably OK.
return error;
}
-void
-ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter)
-{
- set_personality(PER_LINUX);
- if (elf_ex->e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK)
- current->thread.flags |= IA64_THREAD_XSTACK;
- else
- current->thread.flags &= ~IA64_THREAD_XSTACK;
-}
-
pid_t
kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
{
struct inode *inode = file->f_dentry->d_inode;
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
- void *saldata;
- size_t size;
u8 *buf;
u64 bufsize;
buf = NULL;
bufsize = 0;
}
- if (*ppos >= bufsize)
- return 0;
-
- saldata = buf + file->f_pos;
- size = bufsize - file->f_pos;
- if (size > count)
- size = count;
- if (copy_to_user(buffer, saldata, size))
- return -EFAULT;
-
- *ppos += size;
- return size;
+ return simple_read_from_buffer(buffer, count, ppos, buf, bufsize);
}
static void
}
#endif
- /* enable IA-64 Machine Check Abort Handling */
- ia64_mca_init();
-
+ /* enable IA-64 Machine Check Abort Handling unless disabled */
+ if (!strstr(saved_command_line, "nomca"))
+ ia64_mca_init();
+
platform_setup(cmdline_p);
paging_init();
}
smp_setup_percpu_timer();
- ia64_mca_cmc_vector_setup(); /* Setup vector on AP & enable */
+ ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
#ifdef CONFIG_PERFMON
pfm_init_percpu();
low = pgt_cache_water[0];
high = pgt_cache_water[1];
+ preempt_disable();
if (pgtable_cache_size > (u64) high) {
do {
if (pgd_quicklist)
free_page((unsigned long)pmd_alloc_one_fast(0, 0));
} while (pgtable_cache_size > (u64) low);
}
+ preempt_enable();
}
void
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
- vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP;
insert_vm_struct(current->mm, vma);
}
{
struct page *page;
/*
- * EFI uses 4KB pages while the kernel can use 4KB or bigger.
+ * EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
* therefore possible to have the initrd share the same page as
* the end of the kernel (given current setup).
if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1;
}
- setup_gate(); /* setup gate pages before we free up boot memory... */
+ setup_gate();
#ifdef CONFIG_IA32_SUPPORT
ia32_boot_gdt_init();
*
* This code is executed once for each Hub chip.
*/
-static void
+static void __init
per_hub_init(cnodeid_t cnode)
{
nasid_t nasid;
klhwg_add_all_modules(hwgraph_root);
klhwg_add_all_nodes(hwgraph_root);
- for (cnode = 0; cnode < numionodes; cnode++) {
- extern void per_hub_init(cnodeid_t);
+ for (cnode = 0; cnode < numionodes; cnode++)
per_hub_init(cnode);
- }
/*
*
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_ibcr_u_t ibcr;
ii_icmr_u_t icmr;
+ ii_ieclr_u_t ieclr;
BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
+ /* Clear IBLS0/1 error bits */
+ ieclr.ii_ieclr_regval = 0;
+ if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS)
+ ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1;
+ if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS)
+ ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1;
+ REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval);
+
/* Reinitialize both BTE state machines. */
ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
#include <asm/sn/pda.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/nodedata.h>
-#include <asm/delay.h>
#include <linux/bootmem.h>
#include <linux/string.h>
bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
{
u64 transfer_size;
+ u64 transfer_stat;
struct bteinfo_s *bte;
bte_result_t bte_status;
unsigned long irq_flags;
if (!(mode & BTE_WACQUIRE)) {
return BTEFAIL_NOTAVAIL;
}
-
- /* Wait until a bte is available. */
- udelay(1);
} while (1);
return BTE_SUCCESS;
}
- while (*bte->most_rcnt_na == -1UL) {
+ while ((transfer_stat = *bte->most_rcnt_na) == -1UL) {
}
BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
- if (*bte->most_rcnt_na & IBLS_ERROR) {
- bte_status = *bte->most_rcnt_na & ~IBLS_ERROR;
+ if (transfer_stat & IBLS_ERROR) {
+ bte_status = transfer_stat & ~IBLS_ERROR;
*bte->most_rcnt_na = 0L;
} else {
bte_status = BTE_SUCCESS;
}
u8
-sn_irq_to_vector(u8 irq)
+sn_irq_to_vector(unsigned int irq)
{
return(irq);
}
*oemdata_size = 0;
vfree(*oemdata);
*oemdata = NULL;
- if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0)
+ if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0 ||
+ efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0)
return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size);
return 0;
}
return 1;
}
-void bvme6000_reset()
+void bvme6000_reset(void)
{
volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE;
_060_real_lock_page:
move.l %d2,-(%sp)
| load sfc/dfc
- moveq #5,%d0
tst.b %d0
jne 1f
moveq #1,%d0
-1: movec.l %dfc,%d2
+ jra 2f
+1: moveq #5,%d0
+2: movec.l %dfc,%d2
movec.l %d0,%dfc
movec.l %d0,%sfc
}
#endif
+ if (CPU_IS_060) {
+ u32 pcr;
+
+ asm (".chip 68060; movec %%pcr,%0; .chip 68k"
+ : "=d" (pcr));
+ if (((pcr >> 8) & 0xff) <= 5) {
+ printk("Enabling workaround for errata I14\n");
+ asm (".chip 68060; movec %0,%%pcr; .chip 68k"
+ : : "d" (pcr | 0x20));
+ }
+ }
+
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
/*
* user process trying to return with weird frame format
*/
-#if DEBUG
+#ifdef DEBUG
printk("user process returning with weird frame format\n");
#endif
goto badframe;
/*
* user process trying to return with weird frame format
*/
-#if DEBUG
+#ifdef DEBUG
printk("user process returning with weird frame format\n");
#endif
goto badframe;
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
-#if DEBUG
+#ifdef DEBUG
printk("Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
-#if DEBUG
+#ifdef DEBUG
printk("Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
unsigned short ssw = fp->un.fmtb.ssw;
extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
-#if DEBUG
+#ifdef DEBUG
if (ssw & (FC | FB))
printk ("Instruction fault at %#010lx\n",
ssw & FC ?
unsigned short mmusr;
unsigned long addr, errorcode;
unsigned short ssw = fp->un.fmtb.ssw;
-#if DEBUG
+#ifdef DEBUG
unsigned long desc;
printk ("pid = %x ", current->pid);
if (ssw & DF) {
addr = fp->un.fmtb.daddr;
-#if DEBUG
+#ifdef DEBUG
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
"pmove %%psr,%1@"
: "=a&" (desc)
#endif
mmusr = temp;
-#if DEBUG
+#ifdef DEBUG
printk("mmusr is %#x for addr %#lx in task %p\n",
mmusr, addr, current);
printk("descriptor address is %#lx, contents %#lx\n",
: "a" (&tlong));
printk("tt1 is %#lx\n", tlong);
#endif
-#if DEBUG
+#ifdef DEBUG
printk("Unknown SIGSEGV - 1\n");
#endif
die_if_kernel("Oops",&fp->ptregs,mmusr);
should still create the ATC entry. */
goto create_atc_entry;
-#if DEBUG
+#ifdef DEBUG
asm volatile ("ptestr #1,%2@,#7,%0\n\t"
"pmove %%psr,%1@"
: "=a&" (desc)
else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
printk ("invalid insn access at %#lx from pc %#lx\n",
addr, fp->ptregs.pc);
-#if DEBUG
+#ifdef DEBUG
printk("Unknown SIGSEGV - 2\n");
#endif
die_if_kernel("Oops",&fp->ptregs,mmusr);
if (user_mode(&fp->ptregs))
current->thread.esp0 = (unsigned long) fp;
-#if DEBUG
+#ifdef DEBUG
printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
#endif
#endif
default:
die_if_kernel("bad frame format",&fp->ptregs,0);
-#if DEBUG
+#ifdef DEBUG
printk("Unknown SIGSEGV - 4\n");
#endif
force_sig(SIGSEGV, current);
printk ("\n");
}
-extern void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long *endstack;
int i;
* csum_partial_copy_from_user.
*/
+#include <linux/module.h>
#include <net/checksum.h>
/*
#endif
if (irq < VIA1_SOURCE_BASE) {
- return cpu_free_irq(irq, dev_id);
+ cpu_free_irq(irq, dev_id);
+ return;
}
if (irq >= NUM_MAC_SOURCES) {
static inline void free_io_area(void *addr)
{
- return vfree((void *)(PAGE_MASK & (unsigned long)addr));
+ vfree((void *)(PAGE_MASK & (unsigned long)addr));
}
#else
return 0;
}
-#if DEBUG_INVALID_PTOV
+#ifdef DEBUG_INVALID_PTOV
int mm_inv_cnt = 5;
#endif
voff += m68k_memory[i].size;
} while (++i < m68k_num_memory);
-#if DEBUG_INVALID_PTOV
+#ifdef DEBUG_INVALID_PTOV
if (mm_inv_cnt > 0) {
mm_inv_cnt--;
printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
return 1;
}
-void mvme147_reset()
+void mvme147_reset(void)
{
printk ("\r\n\nCalled mvme147_reset\r\n");
m147_pcc->watchdog = 0x0a; /* Clear timer */
return 1;
}
-void mvme16x_reset()
+void mvme16x_reset(void)
{
printk ("\r\n\nCalled mvme16x_reset\r\n"
"\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r");
}
#endif
-void q40_reset()
+void q40_reset(void)
{
halted=1;
printk ("\n\n*******************************************\n"
Q40_LED_ON();
while(1) ;
}
-void q40_halt()
+void q40_halt(void)
{
halted=1;
printk ("\n\n*******************\n"
return 0;
}
-unsigned int q40_get_ss()
+unsigned int q40_get_ss(void)
{
return bcd2bin(Q40_RTC_SECS);
}
goto out;
if (pos < 0)
goto out;
+ ret = -ESPIPE;
+ if (!(file->f_mode & FMODE_PREAD))
+ goto out;
ret = read(file, buf, count, &pos);
if (ret > 0)
dnotify_parent(file->f_dentry, DN_ACCESS);
if (pos < 0)
goto out;
+ ret = -ESPIPE;
+ if (!(file->f_mode & FMODE_PWRITE))
+ goto out;
+
ret = write(file, buf, count, &pos);
if (ret > 0)
dnotify_parent(file->f_dentry, DN_MODIFY);
/* And the same for proc */
int proc_dolasatstring(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
down(&lasat_info_sem);
- r = proc_dostring(table, write, filp, buffer, lenp);
+ r = proc_dostring(table, write, filp, buffer, lenp, ppos);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
/* proc function to write EEPROM after changing int entry */
int proc_dolasatint(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
down(&lasat_info_sem);
- r = proc_dointvec(table, write, filp, buffer, lenp);
+ r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
#ifdef CONFIG_DS1603
/* proc function to read/write RealTime Clock */
int proc_dolasatrtc(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
down(&lasat_info_sem);
if (rtctmp < 0)
rtctmp = 0;
}
- r = proc_dointvec(table, write, filp, buffer, lenp);
+ r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
static char proc_lasat_ipbuf[32];
/* Parsing of IP address */
int proc_lasat_ip(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int len;
unsigned int ip;
char *p, c;
if (!table->data || !table->maxlen || !*lenp ||
- (filp->f_pos && !write)) {
+ (*ppos && !write)) {
*lenp = 0;
return 0;
}
return -EFAULT;
}
proc_lasat_ipbuf[len] = 0;
- filp->f_pos += *lenp;
+ *ppos += *lenp;
/* Now see if we can convert it to a valid IP */
ip = in_aton(proc_lasat_ipbuf);
*(unsigned int *)(table->data) = ip;
len++;
}
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
}
update_bcastaddr();
up(&lasat_info_sem);
}
int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
down(&lasat_info_sem);
- r = proc_dointvec(table, write, filp, buffer, lenp);
+ r = proc_dointvec(table, write, filp, buffer, lenp, ppos);
if ( (!write) || r) {
up(&lasat_info_sem);
return r;
struct net_device *dev;
struct scc_enet_private *cep;
int i, j, err;
- void * dpaddr;
+ uint dp_offset;
unsigned char *eap;
unsigned long mem_addr;
bd_t *bd;
* These are relative offsets in the DP ram address space.
* Initialize base addresses for the buffer descriptors.
*/
- dpaddr = cpm2_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
- ep->sen_genscc.scc_rbase = cpm2_dpram_offset(dpaddr);
- cep->rx_bd_base = (cbd_t *)dpaddr;
+ dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
+ ep->sen_genscc.scc_rbase = dp_offset;
+ cep->rx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset);
- dpaddr = cpm2_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
- ep->sen_genscc.scc_tbase = cpm2_dpram_offset(dpaddr);
- cep->tx_bd_base = (cbd_t *)dpaddr;
+ dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
+ ep->sen_genscc.scc_tbase = dp_offset;
+ cep->tx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset);
cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
cep->cur_rx = cep->rx_bd_base;
#define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16)
void
-m8xx_cpm_setbrg(uint brg, uint rate)
+cpm_setbrg(uint brg, uint rate)
{
volatile uint *bp;
* with the processor and the microcode patches applied / activated.
* But the following should be at least safe.
*/
- rh_attach_region(&cpm_dpmem_info, cp->cp_dpmem + CPM_DATAONLY_BASE,
- CPM_DATAONLY_SIZE);
+ rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
}
/*
* Now it returns the actuall physical address of that area.
* use m8xx_cpm_dpram_offset() to get the index
*/
-void *m8xx_cpm_dpalloc(int size)
+uint cpm_dpalloc(uint size, uint align)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
+ cpm_dpmem_info.alignment = align;
start = rh_alloc(&cpm_dpmem_info, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return start;
+ return (uint)start;
}
-EXPORT_SYMBOL(m8xx_cpm_dpalloc);
+EXPORT_SYMBOL(cpm_dpalloc);
-int m8xx_cpm_dpfree(void *addr)
+int cpm_dpfree(uint offset)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- ret = rh_free(&cpm_dpmem_info, addr);
+ ret = rh_free(&cpm_dpmem_info, (void *)offset);
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
return ret;
}
-EXPORT_SYMBOL(m8xx_cpm_dpfree);
+EXPORT_SYMBOL(cpm_dpfree);
-void *m8xx_cpm_dpalloc_fixed(void *addr, int size)
+uint cpm_dpalloc_fixed(uint offset, uint size, uint align)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- start = rh_alloc_fixed(&cpm_dpmem_info, addr, size, "commproc");
+ cpm_dpmem_info.alignment = align;
+ start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return start;
+ return (uint)start;
}
-EXPORT_SYMBOL(m8xx_cpm_dpalloc_fixed);
+EXPORT_SYMBOL(cpm_dpalloc_fixed);
-void m8xx_cpm_dpdump(void)
+void cpm_dpdump(void)
{
rh_dump(&cpm_dpmem_info);
}
-EXPORT_SYMBOL(m8xx_cpm_dpdump);
+EXPORT_SYMBOL(cpm_dpdump);
-int m8xx_cpm_dpram_offset(void *addr)
-{
- return (u_char *)addr - ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem;
-}
-EXPORT_SYMBOL(m8xx_cpm_dpram_offset);
-
-void *m8xx_cpm_dpram_addr(int offset)
+void *cpm_dpram_addr(uint offset)
{
return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset;
}
-EXPORT_SYMBOL(m8xx_cpm_dpram_addr);
+EXPORT_SYMBOL(cpm_dpram_addr);
static int mixer_open(struct inode *inode, struct file *file)
{
mixer.busy = 1;
- return 0;
+ return nonseekable_open(inode, file);
}
sound_set_format(AFMT_MU_LAW);
}
- return 0;
+ return nonseekable_open(inode, file);
err_out_nobusy:
if (file->f_mode & FMODE_WRITE) {
len += sprintf(buffer+len, "\tsq.active = %d sq.syncing = %d\n",
sq.active, sq.syncing);
state.len = len;
- return 0;
+ return nonseekable_open(inode, file);
}
int __init tdm8xx_sound_init(void)
{
int i, has_sound;
- uint dp_addr, dp_mem;
+ uint dp_offset;
volatile uint *sirp;
volatile cbd_t *bdp;
volatile cpm8xx_t *cp;
/* We need to allocate a transmit and receive buffer
* descriptors from dual port ram.
*/
- dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * numReadBufs);
- dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ dp_addr = cpm_dpalloc(sizeof(cbd_t) * numReadBufs, 8);
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
- up->smc_rbase = dp_mem;
+ up->smc_rbase = dp_offset;
rx_cur = rx_base = (cbd_t *)bdp;
for (i=0; i<(numReadBufs-1); i++) {
/* Now, do the same for the transmit buffers.
*/
- dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * numBufs);
- dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ dp_offset = cpm_dpalloc(sizeof(cbd_t) * numBufs, 8);
bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
- up->smc_tbase = dp_mem;
+ up->smc_tbase = dp_offset;
tx_cur = tx_base = (cbd_t *)bdp;
for (i=0; i<(numBufs-1); i++) {
struct net_device *dev;
struct scc_enet_private *cep;
int i, j, k, err;
- void *dp_mem;
- unsigned int dp_addr;
+ uint dp_offset;
unsigned char *eap, *ba;
dma_addr_t mem_addr;
bd_t *bd;
* These are relative offsets in the DP ram address space.
* Initialize base addresses for the buffer descriptors.
*/
- dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE);
- dp_addr = m8xx_cpm_dpram_offset(dp_mem);
- ep->sen_genscc.scc_rbase = dp_mem;
- cep->rx_bd_base = (cbd_t *)&cp->cp_dpmem[dp_addr];
-
- dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE);
- dp_addr = m8xx_cpm_dpram_offset(dp_mem);
- ep->sen_genscc.scc_tbase = dp_mem;
- cep->tx_bd_base = (cbd_t *)&cp->cp_dpmem[dp_addr];
+ dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
+ ep->sen_genscc.scc_rbase = dp_offset;
+ cep->rx_bd_base = cpm_dpram_addr(dp_offset);
+
+ dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
+ ep->sen_genscc.scc_tbase = dp_offset;
+ cep->tx_bd_base = cpm_dpram_addr(dp_offset);
cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
cep->cur_rx = cep->rx_bd_base;
{
struct serial_state * state;
ser_info_t *info;
- uint mem_addr, dp_addr, dp_mem, iobits;
+ uint mem_addr, iobits, dp_offset;
int i, j, idx;
ushort chan;
volatile cbd_t *bdp;
* descriptors from dual port ram, and a character
* buffer area from host mem.
*/
- dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_NUM_FIFO);
- dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_NUM_FIFO, 8);
/* Allocate space for FIFOs in the host memory.
*/
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
- bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
+ bdp = (cbd_t *)&cp->cp_dpmem[dp_offset];
info->rx_cur = info->rx_bd_base = (cbd_t *)bdp;
for (j=0; j<(RX_NUM_FIFO-1); j++) {
if (info->state->smc_scc_num & NUM_IS_SCC) {
scp = &cp->cp_scc[idx];
sup = (scc_uart_t *)&cp->cp_dparam[state->port];
- sup->scc_genscc.scc_rbase = dp_mem;
+ sup->scc_genscc.scc_rbase = dp_offset;
}
else {
sp = &cp->cp_smc[idx];
up = (smc_uart_t *)&cp->cp_dparam[state->port];
- up->smc_rbase = dp_mem;
+ up->smc_rbase = dp_offset;
}
- dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_NUM_FIFO);
- dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_NUM_FIFO, 8);
/* Allocate space for FIFOs in the host memory.
*/
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
- bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
+ bdp = (cbd_t *)&cp->cp_dpmem[dp_offset];
info->tx_cur = info->tx_bd_base = (cbd_t *)bdp;
for (j=0; j<(TX_NUM_FIFO-1); j++) {
bdp->cbd_sc = (BD_SC_WRAP | BD_SC_INTRPT);
if (info->state->smc_scc_num & NUM_IS_SCC) {
- sup->scc_genscc.scc_tbase = dp_mem;
+ sup->scc_genscc.scc_tbase = dp_offset;
/* Set up the uart parameters in the
* parameter ram.
cp->cp_simode &= ~(0xffff << (idx * 16));
cp->cp_simode |= (i << ((idx * 16) + 12));
- up->smc_tbase = dp_mem;
+ up->smc_tbase = dp_offset;
/* Set up the uart parameters in the
* parameter ram.
static int __init serial_console_setup(struct console *co, char *options)
{
struct serial_state *ser;
- uint mem_addr, dp_addr, dp_mem, bidx, idx;
+ uint mem_addr, bidx, idx, dp_offset;
ushort chan;
volatile cbd_t *bdp;
volatile cpm8xx_t *cp;
* memory yet because vm allocator isn't initialized
* during this early console init.
*/
- dp_mem = m8xx_cpm_dpalloc(8);
- dp_addr = m8xx_cpm_dpram_offset(dp_mem);
- mem_addr = (uint)(&cpmp->cp_dpmem[dp_addr]);
+ dp_offset = cpm_dpalloc(8, 8);
+ mem_addr = (uint)(&cpmp->cp_dpmem[dp_offset]);
/* Allocate space for two buffer descriptors in the DP ram.
*/
- dp_mem = m8xx_cpm_dpalloc(sizeof(cbd_t) * 2);
- dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ dp_offset = cpm_dpalloc(sizeof(cbd_t) * 2, 8);
/* Set the physical address of the host memory buffers in
* the buffer descriptors.
*/
- bdp = (cbd_t *)&cp->cp_dpmem[dp_addr];
+ bdp = (cbd_t *)&cp->cp_dpmem[dp_offset];
bdp->cbd_bufaddr = iopa(mem_addr);
(bdp+1)->cbd_bufaddr = iopa(mem_addr+4);
*/
if (ser->smc_scc_num & NUM_IS_SCC) {
- sup->scc_genscc.scc_rbase = dp_mem;
- sup->scc_genscc.scc_tbase = dp_mem + sizeof(cbd_t);
+ sup->scc_genscc.scc_rbase = dp_offset;
+ sup->scc_genscc.scc_tbase = dp_offset + sizeof(cbd_t);
/* Set up the uart parameters in the
* parameter ram.
}
else {
- up->smc_rbase = dp_mem; /* Base of receive buffer desc. */
- up->smc_tbase = dp_mem+sizeof(cbd_t); /* Base of xmt buffer desc. */
+ up->smc_rbase = dp_offset; /* Base of receive buffer desc. */
+ up->smc_tbase = dp_offset+sizeof(cbd_t); /* Base of xmt buffer desc. */
up->smc_rfcr = SMC_EB;
up->smc_tfcr = SMC_EB;
default 6xx
config 6xx
- bool "6xx/7xx/74xx/8260"
+ bool "6xx/7xx/74xx/52xx/8260"
help
There are four types of PowerPC chips supported. The more common
types (601, 603, 604, 740, 750, 7400), the Motorola embedded
- versions (821, 823, 850, 855, 860, 8260), the IBM embedded versions
- (403 and 405) and the high end 64 bit Power processors (POWER 3,
- POWER4, and IBM 970 also known as G5)
+ versions (821, 823, 850, 855, 860, 52xx, 8260), the IBM embedded
+ versions (403 and 405) and the high end 64 bit Power processors
+ (POWER 3, POWER4, and IBM 970 also known as G5)
Unless you are building a kernel for one of the embedded processor
systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
Note that the kernel runs in 32-bit mode even on 64-bit chips.
- Also note that because the 82xx family has a 603e core, specific
- support for that chipset is asked later on.
+ Also note that because the 52xx & 82xx family has a 603e core,
+ specific support for that chipset is asked later on.
config 40x
bool "40x"
fly. This is a nice method to save battery power on notebooks,
because the lower the clock speed, the less power the CPU consumes.
- For more information, take a look at linux/Documentation/cpu-freq or
+ For more information, take a look at <file:Documentation/cpu-freq> or
at <http://www.brodo.de/cpufreq/>
If in doubt, say N.
More information is available at:
<http://linux-apus.sourceforge.net/>.
-config KATANA
- bool "Artesyn-Katana"
-
-config DMV182
- bool "Dy-4 SVME/DMV-182"
-
config WILLOW
bool "Cogent-Willow"
bool "Force-PowerPMC250"
config EV64260
- bool "Marvell-EV64260BP"
- help
- Select EV64260 if configuring of a Marvell (formerly Galileo)
- EV64260BP Evaluation platofm.
+ bool "Galileo-EV-64260-BP"
config SPRUCE
bool "IBM-Spruce"
config SBS8260
bool "SBS8260"
-config RPX6
+config RPX8260
bool "RPXSUPER"
config TQM8260
config ADS8272
bool "ADS8272"
+config LITE5200
+ bool "Freescale LITE5200 / (IceCube)"
+ select PPC_MPC52xx
+ help
+ Support for the LITE5200 dev board for the MPC5200 from Freescale.
+ This is for the LITE5200 version 2.0 board. Don't know if it changes
+ much but it's only been tested on this board version. I think this
+ board is also known as IceCube.
+
endchoice
config PQ2ADS
bool
depends on 8xx || 8260
default y
+
+config PPC_MPC52xx
+ bool
config 8260
bool "CPM2 Support" if WILLOW
depends on 6xx
- default y if TQM8260 || RPXSUPER || EST8260 || SBS8260 || SBC82xx
+ default y if TQM8260 || RPX8260 || EST8260 || SBS8260 || SBC82xx
help
The MPC8260 is a typical embedded CPU made by Motorola. Selecting
this option means that you wish to build a kernel for a machine with
config CPM2
bool
- depends on 8260
+ depends on 8260 || MPC8560
default y
help
The CPM2 (Communications Processor Module) is a coprocessor on
depends on PPC_PMAC || PPC_CHRP
default y
-menu "Set bridge options"
- depends on MV64X60
-
-config MV64X60_BASE
- hex "Set bridge base used by firmware"
- default "0xf1000000"
- help
- A firmware can leave the base address of the bridge's registers at
- a non-standard location. If so, set this value to reflect the
- address of that non-standard location.
-
-config MV64X60_NEW_BASE
- hex "Set bridge base used by kernel"
- default "0xf1000000"
- help
- If the current base address of the bridge's registers is not where
- you want it, set this value to the address that you want it moved to.
-
-endmenu
-
config PPC_GEN550
bool
- depends on SANDPOINT || MCPN765 || SPRUCE || PPLUS || PCORE || PRPMC750 || K2 || PRPMC800 || (EV64260 && !MV64X60_MPSC) || DMV182
+ depends on SANDPOINT || MCPN765 || SPRUCE || PPLUS || PCORE || PRPMC750 || K2 || PRPMC800
default y
config FORCE
depends on EV64260
default y
-config MV64360
- bool
- depends on KATANA || DMV182
- default y
-
-config MV64X60
- bool
- depends on (GT64260 || MV64360)
- default y
-
config NONMONARCH_SUPPORT
bool "Enable Non-Monarch Support"
depends on PRPMC800
config FSL_OCP
bool
- depends on MPC10X_BRIDGE
+ depends on MPC10X_BRIDGE || PPC_MPC52xx
default y
config MPC10X_OPENPIC
depends on 8xx
default y
+config SERIAL_CONSOLE_BAUD
+ int
+ depends on EV64260
+ default "115200"
+
config PPCBUG_NVRAM
bool "Enable reading PPCBUG NVRAM during boot" if PPLUS || LOPEC
default y if PPC_PREP
an image of the device tree that the kernel copies from Open
Firmware. If unsure, say Y here.
-config PPC_RTAS
- bool "Support for RTAS (RunTime Abstraction Services) in /proc"
- depends on PPC_OF && PROC_FS
- ---help---
- When you use this option, you will be able to use RTAS from
- userspace.
-
- RTAS stands for RunTime Abstraction Services and should
- provide a portable way to access and set system information. This is
- commonly used on RS/6000 (pSeries) computers.
-
- You can access RTAS via the special proc file system entry rtas.
- Don't confuse this rtas entry with the one in /proc/device-tree/rtas
- which is readonly.
-
- If you don't know if you can use RTAS look into
- /proc/device-tree/rtas. If there are some entries, it is very likely
- that you will be able to use RTAS.
-
- You can do cool things with rtas. To print out information about
- various sensors in the system, just do a
-
- $ cat /proc/rtas/sensors
-
- or if you power off your machine at night but want it running when
- you enter your office at 7:45 am, do a
-
- # date -d 'tomorrow 7:30' +%s > /proc/rtas/poweron
-
- and shutdown.
-
- If unsure, say Y.
-
config PREP_RESIDUAL
bool "Support for PReP Residual Data"
depends on PPC_PREP
config KGDB
bool "Include kgdb kernel debugger"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && (BROKEN || PPC_GEN550 || 4xx)
select DEBUG_INFO
help
Include in-kernel hooks for kgdb, the Linux kernel source level
config SERIAL_TEXT_DEBUG
bool "Support for early boot texts over serial port"
- depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || PPC_GEN550
+ depends on 4xx || GT64260 || LOPEC || PPLUS || PRPMC800 || PPC_GEN550 || PPC_MPC52xx
config PPC_OCP
bool
- depends on IBM_OCP || FSL_OCP || MV64X60
+ depends on IBM_OCP || FSL_OCP
default y
endmenu
LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic
CPPFLAGS += -Iarch/$(ARCH)
AFLAGS += -Iarch/$(ARCH)
-cflags-y += -Iarch/$(ARCH) -msoft-float -pipe \
+CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \
-ffixed-r2 -Wno-uninitialized -mmultiple
CPP = $(CC) -E $(CFLAGS)
+CHECK := $(CHECK) -D__powerpc__=1
+
ifndef CONFIG_E500
-cflags-y += -mstring
+CFLAGS += -mstring
endif
-cflags-$(CONFIG_4xx) += -Wa,-m405
-cflags-$(CONFIG_E500) += -Wa,-me500
-cflags-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
+cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
+cpu-as-$(CONFIG_4xx) += -Wa,-m405
+cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
+cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
+cpu-as-$(CONFIG_E500) += -Wa,-me500
-CFLAGS += $(cflags-y)
+AFLAGS += $(cpu-as-y)
+CFLAGS += $(cpu-as-y)
head-y := arch/ppc/kernel/head.o
head-$(CONFIG_8xx) := arch/ppc/kernel/head_8xx.o
else
NEW_AS := 0
endif
+# gcc-3.4 and binutils-2.14 are a fatal combination.
+GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
+BAD_GCC_AS := $(shell echo mftb 5 | $(AS) -mppc -many -o /dev/null >/dev/null 2>&1 && echo 0 || echo 1)
-ifneq ($(NEW_AS),0)
checkbin:
+ifeq ($(GCC_VERSION)$(BAD_GCC_AS),03041)
+ @echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '
+ @echo 'correctly with gcc-3.4 and your version of binutils.'
+ @echo '*** Please upgrade your binutils or downgrade your gcc'
+ @false
+endif
+ifneq ($(NEW_AS),0)
@echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '
@echo 'correctly with old versions of binutils.'
@echo '*** Please upgrade your binutils to ${GOODVER} or newer'
@false
-else
-checkbin:
- @true
endif
+ @true
CLEAN_FILES += include/asm-$(ARCH)/offsets.h \
arch/$(ARCH)/kernel/asm-offsets.s
void _vprintk(void(*putc)(const char), const char *fmt0, va_list ap);
unsigned char *ISA_io = NULL;
-#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) || \
- defined(CONFIG_SERIAL_MPSC_CONSOLE)
+#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
+ || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
extern unsigned long com_port;
extern int serial_tstc(unsigned long com_port);
int tstc(void)
{
-#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) || \
- defined(CONFIG_SERIAL_MPSC_CONSOLE)
+#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
+ || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
if(keyb_present)
return (CRT_tstc() || serial_tstc(com_port));
else
int getc(void)
{
while (1) {
-#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) || \
- defined(CONFIG_SERIAL_MPSC_CONSOLE)
+#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
+ || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
if (serial_tstc(com_port))
return (serial_getc(com_port));
#endif /* serial console */
{
int x,y;
-#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) || \
- defined(CONFIG_SERIAL_MPSC_CONSOLE)
+#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
+ || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
serial_putc(com_port, c);
if ( c == '\n' )
serial_putc(com_port, '\r');
y = orig_y;
while ( ( c = *s++ ) != '\0' ) {
-#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) || \
- defined(CONFIG_SERIAL_MPSC_CONSOLE)
+#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE) \
+ || defined(CONFIG_SERIAL_MPC52xx_CONSOLE)
serial_putc(com_port, c);
if ( c == '\n' ) serial_putc(com_port, '\r');
#endif /* serial console */
end-$(CONFIG_OCOTEA) := ocotea
entrypoint-$(CONFIG_OCOTEA) := 0x01000000
- extra.o-$(CONFIG_EV64260) := misc-ev64260.o
+ extra.o-$(CONFIG_EV64260) := direct.o misc-ev64260.o
end-$(CONFIG_EV64260) := ev64260
cacheflag-$(CONFIG_EV64260) := -include $(clear_L2_L3)
entrypoint-$(CONFIG_SPRUCE) := 0x00800000
misc-$(CONFIG_SPRUCE) += misc-spruce.o
+ zimage-$(CONFIG_LITE5200) := zImage-STRIPELF
+zimageinitrd-$(CONFIG_LITE5200) := zImage.initrd-STRIPELF
+ end-$(CONFIG_LITE5200) := lite5200
+ cacheflag-$(CONFIG_LITE5200) := -include $(clear_L2_L3)
+
+
# SMP images should have a '.smp' suffix.
end-$(CONFIG_SMP) := $(end-y).smp
boot-$(CONFIG_8260) += embed_config.o
boot-$(CONFIG_BSEIP) += iic.o
boot-$(CONFIG_MBX) += iic.o pci.o qspan_pci.o
-boot-$(CONFIG_MV64X60) += misc-mv64x60.o
-boot-$(CONFIG_DMV182) += mv64x60_stub.o
boot-$(CONFIG_RPXCLASSIC) += iic.o pci.o qspan_pci.o
boot-$(CONFIG_RPXLITE) += iic.o
# Different boards need different serial implementations.
-ifeq ($(CONFIG_SERIAL_CONSOLE),y)
+ifeq ($(CONFIG_SERIAL_CPM_CONSOLE),y)
boot-$(CONFIG_8xx) += m8xx_tty.o
boot-$(CONFIG_8260) += m8260_tty.o
endif
-boot-$(CONFIG_SERIAL_MPSC_CONSOLE) += mv64x60_tty.o
+boot-$(CONFIG_SERIAL_MPC52xx_CONSOLE) += mpc52xx_tty.o
+boot-$(CONFIG_GT64260_CONSOLE) += gt64260_tty.o
LIBS := $(common)/lib.a $(bootlib)/lib.a
ifeq ($(CONFIG_PPC_PREP),y)
#endif /* CONFIG_MBX */
#if defined(CONFIG_RPXLITE) || defined(CONFIG_RPXCLASSIC) || \
- defined(CONFIG_RPX6) || defined(CONFIG_EP405)
+ defined(CONFIG_RPX8260) || defined(CONFIG_EP405)
/* Helper functions for Embedded Planet boards.
*/
/* Because I didn't find anything that would do this.......
}
}
-#ifdef CONFIG_RPX6
+#ifdef CONFIG_RPX8260
static uint
rpx_baseten(u_char *cp)
{
}
#endif /* SBS8260 */
-#ifdef CONFIG_RPX6
+#ifdef CONFIG_RPX8260
void
embed_config(bd_t **bdp)
{
isync
#endif
-#if defined(CONFIG_MBX) || defined(CONFIG_RPX6) || defined(CONFIG_PPC_PREP)
+#if defined(CONFIG_MBX) || defined(CONFIG_RPX8260) || defined(CONFIG_PPC_PREP)
mr r29,r3 /* On the MBX860, r3 is the board info pointer.
* On the RPXSUPER, r3 points to the NVRAM
* configuration keys.
mr r3, r29
#endif
-#if defined(CONFIG_MBX) || defined(CONFIG_RPX6) || defined(CONFIG_PPC_PREP)
+#if defined(CONFIG_MBX) || defined(CONFIG_RPX8260) || defined(CONFIG_PPC_PREP)
mr r4,r29 /* put the board info pointer where the relocate
* routine will find it
*/
#endif
-#ifdef CONFIG_MV64X60
- /* mv64x60 specific hook to do things like moving register base, etc. */
- bl mv64x60_init
+#ifdef CONFIG_EV64260
+ /* Move 64260's base regs & CS window for external UART */
+ bl ev64260_init
#endif
/* Get the load address.
/* If defined, enables serial console. The value (1 through 4)
* should designate which SCC is used, but this isn't complete. Only
* SCC1 is known to work at this time.
+ * We're only linked if SERIAL_CPM_CONSOLE=y, so we only need to test
+ * SERIAL_CPM_SCC1.
*/
-#ifdef CONFIG_SCC_CONSOLE
+#ifdef CONFIG_SERIAL_CPM_SCC1
#define SCC_CONSOLE 1
#endif
unsigned long
serial_init(int ignored, bd_t *bd)
{
- volatile smc_t *sp;
- volatile smc_uart_t *up;
#ifdef SCC_CONSOLE
volatile scc_t *sccp;
volatile scc_uart_t *sup;
+#else
+ volatile smc_t *sp;
+ volatile smc_uart_t *up;
#endif
volatile cbd_t *tbdf, *rbdf;
volatile cpm2_map_t *ip;
{
volatile cbd_t *rbdf;
volatile char *buf;
- volatile smc_uart_t *up;
+#ifdef SCC_CONSOLE
volatile scc_uart_t *sup;
+#else
+ volatile smc_uart_t *up;
+#endif
volatile cpm2_map_t *ip;
int i, nc;
{
volatile cbd_t *tbdf;
volatile char *buf;
- volatile smc_uart_t *up;
+#ifdef SCC_CONSOLE
volatile scc_uart_t *sup;
+#else
+ volatile smc_uart_t *up;
+#endif
volatile cpm2_map_t *ip;
- extern bd_t *board_info;
ip = (cpm2_map_t *)CPM_MAP_ADDR;
#ifdef SCC_CONSOLE
serial_tstc(void *ignored)
{
volatile cbd_t *rbdf;
- volatile smc_uart_t *up;
+#ifdef SCC_CONSOLE
volatile scc_uart_t *sup;
+#else
+ volatile smc_uart_t *up;
+#endif
volatile cpm2_map_t *ip;
ip = (cpm2_map_t *)CPM_MAP_ADDR;
* initialize the serial console port.
*/
embed_config(&bp);
-#if defined(CONFIG_SERIAL_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
+#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
com_port = serial_init(0, bp);
#endif
rec = (struct bi_record *)((unsigned long)rec + rec->size);
}
puts("Now booting the kernel\n");
+#if defined(CONFIG_SERIAL_CPM_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
serial_close(com_port);
+#endif
return (unsigned long)hold_residual;
}
/*
* arch/ppc/boot/simple/misc-ev64260.S
- *
+ *
* Host bridge init code for the Marvell/Galileo EV-64260-BP evaluation board
* with a GT64260 onboard.
*
* Author: Mark Greer <mgreer@mvista.com>
*
- * Copyright 2001 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * 2001 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
*/
#include <asm/ppc_asm.h>
-#include <asm/processor.h>
#include <asm/cache.h>
-#include <asm/mv64x60_defs.h>
+#include <asm/gt64260_defs.h>
+
#include <platforms/ev64260.h>
- .globl mv64x60_board_init
-mv64x60_board_init:
- /* DINK doesn't enable 745x timebase, so enable here (Adrian Cox) */
- mfspr r25,PVR
- srwi r25,r25,16
- cmplwi r25,(PVR_7450 >> 16)
- bne 1f
- mfspr r25,HID0
- oris r25,r25,(HID0_TBEN >> 16)
- mtspr HID0,r25
-1:
-#if (CONFIG_MV64X60_NEW_BASE != CONFIG_MV64X60_BASE)
+ .globl ev64260_init
+ev64260_init:
+ li r20,0
li r23,20
- /*
- * Change the CS2 window for the UART so that the bootloader
- * can do I/O thru the UARTs.
- */
- addis r25,0,CONFIG_MV64X60_NEW_BASE@h
- ori r25,r25,MV64x60_CPU2DEV_2_BASE
+ /* Relocate galileo's regs */
+ addis r25,0,GT64260_INTERNAL_SPACE_DEFAULT_ADDR@h
+ ori r25,r25,GT64260_INTERNAL_SPACE_DECODE
+ lwbrx r26,0,(r25)
+ lis r24,0xffff
+ and r26,r26,r24
+ addis r24,0,EV64260_BRIDGE_REG_BASE@h
+ srw r24,r24,r23
+ or r26,r26,r24
+ stwbrx r26,0,(r25)
+ sync
+
+ /* Wait for write to take effect */
+ addis r25,0,EV64260_BRIDGE_REG_BASE@h
+ ori r25,r25,GT64260_INTERNAL_SPACE_DECODE
+1: lwbrx r24,0,(r25)
+ cmpw r24,r26
+ bne 1b
+
+ /* Change CS2 (UARTS on device module) window */
+ addis r25,0,EV64260_BRIDGE_REG_BASE@h
+ ori r25,r25,GT64260_CPU_CS_DECODE_2_BOT
addis r26,0,EV64260_UART_BASE@h
srw r26,r26,r23
stwbrx r26,0,(r25)
sync
- addis r25,0,CONFIG_MV64X60_NEW_BASE@h
- ori r25,r25,MV64x60_CPU2DEV_2_SIZE
+ addis r25,0,EV64260_BRIDGE_REG_BASE@h
+ ori r25,r25,GT64260_CPU_CS_DECODE_2_TOP
addis r26,0,EV64260_UART_END@h
srw r26,r26,r23
stwbrx r26,0,(r25)
sync
-#endif
- blr
-
-#if defined(CONFIG_SERIAL_MPSC_CONSOLE)
-.data
- .globl mv64x60_console_baud
-mv64x60_console_baud:
-.long EV64260_DEFAULT_BAUD
- .globl mv64x60_mpsc_clk_src
-mv64x60_mpsc_clk_src:
-.long EV64260_MPSC_CLK_SRC
-
- .globl mv64x60_mpsc_clk_freq
-mv64x60_mpsc_clk_freq:
-.long EV64260_MPSC_CLK_FREQ
-#endif
+ blr
* user to edit the cmdline or not.
*/
#if (defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_VGA_CONSOLE)) \
- && !defined(CONFIG_GEMINI) || defined(CONFIG_SERIAL_MPSC_CONSOLE)
+ && !defined(CONFIG_GEMINI)
#define INTERACTIVE_CONSOLE 1
#endif
unsigned long initrd_loc, TotalMemory = 0;
serial_fixups();
-#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_MPSC_CONSOLE)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
com_port = serial_init(0, NULL);
#endif
puts("\n");
puts("Uncompressing Linux...");
- gunzip(0, 0x400000, zimage_start, &zimage_size);
+ gunzip(NULL, 0x400000, zimage_start, &zimage_size);
puts("done.\n");
/* get the bi_rec address */
#include <sys/stat.h>
#include <unistd.h>
#include <netinet/in.h>
+#include <stdint.h>
/* This gets tacked on the front of the image. There are also a few
* bytes allocated after the _start label used by the boot rom (see
* head.S for details).
*/
typedef struct boot_block {
- unsigned long bb_magic; /* 0x0052504F */
- unsigned long bb_dest; /* Target address of the image */
- unsigned long bb_num_512blocks; /* Size, rounded-up, in 512 byte blks */
- unsigned long bb_debug_flag; /* Run debugger or image after load */
- unsigned long bb_entry_point; /* The image address to start */
- unsigned long bb_checksum; /* 32 bit checksum including header */
- unsigned long reserved[2];
+ uint32_t bb_magic; /* 0x0052504F */
+ uint32_t bb_dest; /* Target address of the image */
+ uint32_t bb_num_512blocks; /* Size, rounded-up, in 512 byte blks */
+ uint32_t bb_debug_flag; /* Run debugger or image after load */
+ uint32_t bb_entry_point; /* The image address to start */
+ uint32_t bb_checksum; /* 32 bit checksum including header */
+ uint32_t reserved[2];
} boot_block_t;
#define IMGBLK 512
CONFIG_MMU=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_HAVE_DEC_LOCK=y
-CONFIG_PPC=y
-CONFIG_PPC32=y
-CONFIG_GENERIC_NVRAM=y
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
-CONFIG_BROKEN_ON_SMP=y
#
# General setup
#
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
-# CONFIG_AUDIT is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_HOTPLUG is not set
-# CONFIG_IKCONFIG is not set
# CONFIG_EMBEDDED is not set
-CONFIG_KALLSYMS=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
#
# Loadable module support
CONFIG_KMOD=y
#
-# Processor
+# Platform support
#
+CONFIG_PPC=y
+CONFIG_PPC32=y
CONFIG_6xx=y
# CONFIG_40x is not set
-# CONFIG_44x is not set
# CONFIG_POWER3 is not set
-# CONFIG_POWER4 is not set
# CONFIG_8xx is not set
-CONFIG_ALTIVEC=y
-# CONFIG_TAU is not set
-# CONFIG_CPU_FREQ is not set
-CONFIG_PPC_STD_MMU=y
#
-# Platform options
+# IBM 4xx options
#
+# CONFIG_8260 is not set
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_PPC_STD_MMU=y
# CONFIG_PPC_MULTIPLATFORM is not set
# CONFIG_APUS is not set
-# CONFIG_KATANA is not set
-# CONFIG_WILLOW is not set
+# CONFIG_WILLOW_2 is not set
# CONFIG_PCORE is not set
# CONFIG_POWERPMC250 is not set
CONFIG_EV64260=y
# CONFIG_K2 is not set
# CONFIG_PAL4 is not set
# CONFIG_GEMINI is not set
-# CONFIG_EST8260 is not set
-# CONFIG_SBS8260 is not set
-# CONFIG_RPX6 is not set
-# CONFIG_TQM8260 is not set
-
-#
-# Set bridge base address
-#
-CONFIG_MV64X60_BASE=0xf1000000
-CONFIG_MV64X60_NEW_BASE=0xfbe00000
-CONFIG_PPC_GEN550=y
CONFIG_GT64260=y
-CONFIG_MV64X60=y
CONFIG_SERIAL_CONSOLE_BAUD=115200
# CONFIG_SMP is not set
# CONFIG_PREEMPT is not set
-# CONFIG_HIGHMEM is not set
-CONFIG_KERNEL_ELF=y
-CONFIG_BINFMT_ELF=y
-CONFIG_BINFMT_MISC=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0,115200 ip=on"
+CONFIG_ALTIVEC=y
+CONFIG_TAU=y
+# CONFIG_TAU_INT is not set
+# CONFIG_TAU_AVERAGE is not set
+# CONFIG_CPU_FREQ is not set
#
-# Bus options
+# General setup
#
-CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_HIGHMEM is not set
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
+CONFIG_KCORE_ELF=y
+CONFIG_BINFMT_ELF=y
+CONFIG_KERNEL_ELF=y
+CONFIG_BINFMT_MISC=y
CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_NAMES=y
+# CONFIG_HOTPLUG is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+# CONFIG_PPC601_SYNC_FIX is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,115200 ip=on"
#
# Advanced setup
CONFIG_TASK_SIZE=0x80000000
CONFIG_BOOT_LOAD=0x00800000
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-
#
# Memory Technology Devices (MTD)
#
# CONFIG_MTD is not set
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
#
# Plug and Play support
#
+# CONFIG_PNP is not set
#
# Block devices
# CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_UMEM is not set
CONFIG_BLK_DEV_LOOP=y
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_CARMEL is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_LBD is not set
#
-# ATA/ATAPI/MFM/RLL support
+# Multi-device support (RAID and LVM)
#
-# CONFIG_IDE is not set
+# CONFIG_MD is not set
#
-# SCSI device support
+# ATA/IDE/MFM/RLL support
#
-# CONFIG_SCSI is not set
+# CONFIG_IDE is not set
#
-# Multi-device support (RAID and LVM)
+# SCSI support
#
-# CONFIG_MD is not set
+# CONFIG_SCSI is not set
#
# Fusion MPT device support
#
#
-# IEEE 1394 (FireWire) support
+# IEEE 1394 (FireWire) support (EXPERIMENTAL)
#
# CONFIG_IEEE1394 is not set
#
# CONFIG_I2O is not set
-#
-# Macintosh device drivers
-#
-
#
# Networking support
#
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
# CONFIG_NETLINK_DEV is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
# CONFIG_ARPD is not set
+# CONFIG_INET_ECN is not set
CONFIG_SYN_COOKIES=y
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_IP_NF_CONNTRACK is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_IPTABLES is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
# CONFIG_IPV6 is not set
-# CONFIG_NETFILTER is not set
+# CONFIG_XFRM_USER is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
+CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
+# CONFIG_LLC is not set
# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
+# CONFIG_BRIDGE is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
# CONFIG_NET_DIVERT is not set
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
#
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
+# CONFIG_MII is not set
# CONFIG_OAKNET is not set
# CONFIG_HAPPYMEAL is not set
# CONFIG_SUNGEM is not set
#
# Tulip family network device support
#
-CONFIG_NET_TULIP=y
-# CONFIG_DE2104X is not set
-CONFIG_TULIP=y
-# CONFIG_TULIP_MWI is not set
-# CONFIG_TULIP_MMIO is not set
-# CONFIG_TULIP_NAPI is not set
-# CONFIG_DE4X5 is not set
-# CONFIG_WINBOND_840 is not set
-# CONFIG_DM9102 is not set
+# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
-# CONFIG_FORCEDETH is not set
# CONFIG_DGRS is not set
CONFIG_EEPRO100=y
# CONFIG_EEPRO100_PIO is not set
# Ethernet (10000 Mbit)
#
# CONFIG_IXGB is not set
-# CONFIG_S2IO is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
#
-# Token Ring devices
+# Wireless LAN (non-hamradio)
#
-# CONFIG_TR is not set
+# CONFIG_NET_RADIO is not set
#
-# Wireless LAN (non-hamradio)
+# Token Ring devices (depends on LLC=y)
#
-# CONFIG_NET_RADIO is not set
+# CONFIG_RCPCI is not set
+# CONFIG_SHAPER is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
+
+#
+# Amateur Radio support
+#
+# CONFIG_HAMRADIO is not set
+
+#
+# IrDA (infrared) support
+#
+# CONFIG_IRDA is not set
#
# ISDN subsystem
#
-# CONFIG_ISDN is not set
+# CONFIG_ISDN_BOOL is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
#
-# Telephony Support
+# Old CD-ROM drivers (not SCSI, not IDE)
#
-# CONFIG_PHONE is not set
+# CONFIG_CD_NO_IDESCSI is not set
#
# Input device support
#
-CONFIG_INPUT=y
+# CONFIG_INPUT is not set
#
# Userland interfaces
#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
#
# Input I/O drivers
# CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y
# CONFIG_SERIO is not set
-# CONFIG_SERIO_I8042 is not set
#
# Input Device Drivers
#
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
+
+#
+# Macintosh device drivers
+#
#
# Character devices
#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
# CONFIG_SERIAL_NONSTANDARD is not set
#
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=4
# CONFIG_SERIAL_8250_EXTENDED is not set
#
# Non-8250 serial port support
#
-# CONFIG_SERIAL_MPSC is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_UNIX98_PTY_COUNT=256
+
+#
+# I2C support
+#
+CONFIG_I2C=m
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+CONFIG_I2C_CHARDEV=m
+
+#
+# I2C Hardware Sensors Mainboard support
+#
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C Hardware Sensors Chip support
+#
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_I2C_SENSOR is not set
+
+#
+# Mice
+#
+# CONFIG_BUSMOUSE is not set
# CONFIG_QIC02_TAPE is not set
#
# CONFIG_AGP is not set
# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# Misc devices
-#
+# CONFIG_HANGCHECK_TIMER is not set
#
# Multimedia devices
#
# CONFIG_DVB is not set
-#
-# Graphics support
-#
-# CONFIG_FB is not set
-
-#
-# Console display driver support
-#
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_MDA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-# CONFIG_USB is not set
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
#
# File systems
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
CONFIG_DEVFS_FS=y
# CONFIG_DEVFS_MOUNT is not set
# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVPTS_FS=y
# CONFIG_DEVPTS_FS_XATTR is not set
CONFIG_TMPFS=y
-# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
#
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
# CONFIG_NFSD is not set
CONFIG_ROOT_NFS=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
# CONFIG_EXPORTFS is not set
CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SUNRPC_GSS is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
CONFIG_MSDOS_PARTITION=y
#
-# Native Language Support
+# Sound
#
-# CONFIG_NLS is not set
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+# CONFIG_USB_GADGET is not set
+
+#
+# Bluetooth support
+#
+# CONFIG_BT is not set
#
# Library routines
#
-CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
+# CONFIG_CRC32 is not set
#
# Kernel hacking
#
# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_KALLSYMS is not set
# CONFIG_SERIAL_TEXT_DEBUG is not set
-CONFIG_PPC_OCP=y
#
# Security options
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
-# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
#
# Non-8250 serial port support
#
-# CONFIG_SERIAL_CORE is not set
-# CONFIG_SERIAL_PMACZILOG is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_PMACZILOG=y
# CONFIG_SERIAL_PMACZILOG_CONSOLE is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
# Makefile for the linux kernel.
#
-ifdef CONFIG_PPC64BRIDGE
-EXTRA_AFLAGS := -Wa,-mppc64bridge
-endif
-ifdef CONFIG_4xx
-EXTRA_AFLAGS := -Wa,-m405
-endif
-ifdef CONFIG_E500
-EXTRA_AFLAGS := -Wa,-me500
-endif
-
extra-$(CONFIG_PPC_STD_MMU) := head.o
extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x) := head_44x.o
/* All of the bits we have to set.....
*/
- ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
+ ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_LRSTK
+BEGIN_FTR_SECTION
+ ori r11,r11,HID0_BTIC
+END_FTR_SECTION_IFCLR(CPU_FTR_NO_BTIC)
BEGIN_FTR_SECTION
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
#endif
/* We need to mark all pages as being coherent if we're SMP or we
- * have a 754x and an MPC107 host bridge. */
+ * have a 754x and an MPC107 host bridge.
+ */
#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
#else
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450,
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_L3_DISABLE_NAP,
+ CPU_FTR_L3_DISABLE_NAP | CPU_FTR_NEED_COHERENT,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR,
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
+ CPU_FTR_NEED_COHERENT,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS |
+ CPU_FTR_NEED_COHERENT,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_L3_DISABLE_NAP | CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTR_L3_DISABLE_NAP | CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
+ COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+ 32, 32,
+ __setup_cpu_745x
+ },
+ { /* 7447/7457 Rev 1.0 */
+ 0xffffffff, 0x80020100, "7447/7457",
+ CPU_FTR_COMMON |
+ CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
+ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
+ CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
+ COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
+ 32, 32,
+ __setup_cpu_745x
+ },
+ { /* 7447/7457 Rev 1.1 */
+ 0xffffffff, 0x80020101, "7447/7457",
+ CPU_FTR_COMMON |
+ CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
+ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
+ CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
},
- { /* 7457 */
- 0xffff0000, 0x80020000, "7457",
+ { /* 7447/7457 Rev 1.2 and later */
+ 0xffff0000, 0x80020000, "7447/7457",
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
32, 32,
__setup_cpu_603
},
- { /* 8280 is a G2_LE (603e core, plus some) */
- 0x7fff0000, 0x00820000, "8280",
+ { /* All G2_LE (603e core, plus some) have the same pvr */
+ 0x7fff0000, 0x00820000, "G2_LE",
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_CAN_DOZE | CPU_FTR_USE_TB |
CPU_FTR_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC,
no_page:
return NULL;
}
+EXPORT_SYMBOL(__dma_alloc_coherent);
/*
* free a page as defined by the above mapping.
__func__, vaddr);
dump_stack();
}
-EXPORT_SYMBOL(dma_free_coherent);
+EXPORT_SYMBOL(__dma_free_coherent);
/*
* Initialise the consistent memory allocation.
rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe14 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
- mtspr RPA,r1
+ mtspr SPRN_RPA,r1
mfspr r3,IMISS
tlbli r3
mfspr r3,SRR1 /* Need to restore CR0 */
rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe14 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
- mtspr RPA,r1
+ mtspr SPRN_RPA,r1
mfspr r3,DMISS
tlbld r3
mfspr r3,SRR1 /* Need to restore CR0 */
rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
li r1,0xe15 /* clear out reserved bits and M */
andc r1,r3,r1 /* PP = user? 2: 0 */
- mtspr RPA,r1
+ mtspr SPRN_RPA,r1
mfspr r3,DMISS
tlbld r3
mfspr r3,SRR1 /* Need to restore CR0 */
tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
- ori r3,r3,PPC44x_TLB_TS /* Translation state 1 */
-
- li r0,1 /* TLB slot 1 */
-
- tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
- tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
- tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
-
/* Force context change */
isync
#endif /* CONFIG_SERIAL_TEXT_DEBUG */
evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
li r4,THREAD_ACC
evstddx evr6, r4, r3 /* save off accumulator */
+ mfspr r6,SPRN_SPEFSCR
+ stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_SPE@h
cache_bitmask |= (1<<i);
return (void *)(&malloc_cache[i]);
}
- return 0;
+ return NULL;
}
void irq_kfree(void *ptr)
if (!shared) {
desc->depth = 0;
desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
- unmask_irq(irq);
+ if (desc->handler) {
+ if (desc->handler->startup)
+ desc->handler->startup(irq);
+ else if (desc->handler->enable)
+ desc->handler->enable(irq);
+ }
}
spin_unlock_irqrestore(&desc->lock,flags);
int i;
/* create /proc/irq */
- root_irq_dir = proc_mkdir("irq", 0);
+ root_irq_dir = proc_mkdir("irq", NULL);
/* create /proc/irq/prof_cpu_mask */
entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
struct pci_dev* dev;
unsigned int *class_code, *reg;
- class_code = (unsigned int *) get_property(node, "class-code", 0);
+ class_code = (unsigned int *) get_property(node, "class-code", NULL);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
- reg = (unsigned int *)get_property(node, "reg", 0);
+ reg = (unsigned int *)get_property(node, "reg", NULL);
if (!reg)
continue;
dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
continue;
make_one_node_map(node, hose->first_busno);
}
- of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", 0);
+ of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL);
if (of_prop_map)
memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count);
#ifdef DEBUG
* a fake root for all functions of a multi-function device,
* we go down them as well.
*/
- class_code = (unsigned int *) get_property(node, "class-code", 0);
+ class_code = (unsigned int *) get_property(node, "class-code", NULL);
if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
strcmp(node->name, "multifunc-device"))
unsigned int *reg;
u8* fdata = (u8*)data;
- reg = (unsigned int *) get_property(node, "reg", 0);
+ reg = (unsigned int *) get_property(node, "reg", NULL);
if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
&& ((reg[0] >> 16) & 0xff) == fdata[0])
return 1;
if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
find_OF_pci_device_filter, (void *)node))
return -ENODEV;
- reg = (unsigned int *) get_property(node, "reg", 0);
+ reg = (unsigned int *) get_property(node, "reg", NULL);
if (!reg)
return -ENODEV;
*bus = (reg[0] >> 16) & 0xff;
} else {
/* error condition */
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
*buf = 0;
return buf;
}
} else {
/* error condition */
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
return mem;
}
} else {
/* error condition */
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
return (numChars);
}
#include <linux/ctype.h>
#include <linux/threads.h>
#include <linux/smp_lock.h>
+#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
#include <asm/system.h>
#include <asm/reg.h>
-static ssize_t ppc_htab_read(struct file * file, char __user * buf,
- size_t count, loff_t *ppos);
+static int ppc_htab_show(struct seq_file *m, void *v);
static ssize_t ppc_htab_write(struct file * file, const char __user * buffer,
size_t count, loff_t *ppos);
-static long long ppc_htab_lseek(struct file * file, loff_t offset, int orig);
int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp);
+ void __user *buffer, size_t *lenp, loff_t *ppos);
extern PTE *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
extern unsigned int primary_pteg_full;
extern unsigned int htab_hash_searches;
+static int ppc_htab_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ppc_htab_show, NULL);
+}
+
struct file_operations ppc_htab_operations = {
- .llseek = ppc_htab_lseek,
- .read = ppc_htab_read,
- .write = ppc_htab_write,
+ .open = ppc_htab_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = ppc_htab_write,
+ .release = single_release,
};
static char *pmc1_lookup(unsigned long mmcr0)
* is _REALLY_ slow (see the nested for loops below) but nothing
* in here should be really timing critical. -- Cort
*/
-static ssize_t ppc_htab_read(struct file * file, char __user * buf,
- size_t count, loff_t *ppos)
+static int ppc_htab_show(struct seq_file *m, void *v)
{
unsigned long mmcr0 = 0, pmc1 = 0, pmc2 = 0;
- int n = 0;
#if defined(CONFIG_PPC_STD_MMU) && !defined(CONFIG_PPC64BRIDGE)
unsigned int kptes = 0, uptes = 0;
PTE *ptr;
#endif /* CONFIG_PPC_STD_MMU */
- char buffer[512];
-
- if (count < 0)
- return -EINVAL;
if (cur_cpu_spec[0]->cpu_features & CPU_FTR_604_PERF_MON) {
mmcr0 = mfspr(SPRN_MMCR0);
pmc1 = mfspr(SPRN_PMC1);
pmc2 = mfspr(SPRN_PMC2);
- n += sprintf( buffer + n,
+ seq_printf(m,
"604 Performance Monitoring\n"
"MMCR0\t\t: %08lx %s%s ",
mmcr0,
( mmcr0>>28 & 0x2 ) ? "(user mode counted)" : "",
( mmcr0>>28 & 0x4 ) ? "(kernel mode counted)" : "");
- n += sprintf( buffer + n,
+ seq_printf(m,
"\nPMC1\t\t: %08lx (%s)\n"
"PMC2\t\t: %08lx (%s)\n",
pmc1, pmc1_lookup(mmcr0),
#ifdef CONFIG_PPC_STD_MMU
/* if we don't have a htab */
- if ( Hash_size == 0 )
- {
- n += sprintf( buffer + n, "No Hash Table used\n");
- goto return_string;
+ if ( Hash_size == 0 ) {
+ seq_printf(m, "No Hash Table used\n");
+ return 0;
}
#ifndef CONFIG_PPC64BRIDGE
}
#endif
- n += sprintf( buffer + n,
+ seq_printf(m,
"PTE Hash Table Information\n"
"Size\t\t: %luKb\n"
"Buckets\t\t: %lu\n"
#endif
);
- n += sprintf( buffer + n,
+ seq_printf(m,
"Reloads\t\t: %lu\n"
"Preloads\t: %lu\n"
"Searches\t: %u\n"
"Evicts\t\t: %lu\n",
htab_reloads, htab_preloads, htab_hash_searches,
primary_pteg_full, htab_evicts);
-return_string:
#endif /* CONFIG_PPC_STD_MMU */
- n += sprintf( buffer + n,
+ seq_printf(m,
"Non-error misses: %lu\n"
"Error misses\t: %lu\n",
pte_misses, pte_errors);
- if (*ppos >= strlen(buffer))
- return 0;
- if (n > strlen(buffer) - *ppos)
- n = strlen(buffer) - *ppos;
- if (n > count)
- n = count;
- if (copy_to_user(buf, buffer + *ppos, n))
- return -EFAULT;
- *ppos += n;
- return n;
+ return 0;
}
/*
unsigned long tmp;
char buffer[16];
- if ( current->uid != 0 )
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (strncpy_from_user(buffer, ubuffer, 15))
return -EFAULT;
#endif /* CONFIG_PPC_STD_MMU */
}
-
-static long long
-ppc_htab_lseek(struct file * file, loff_t offset, int orig)
-{
- long long ret = -EINVAL;
-
- lock_kernel();
- switch (orig) {
- case 0:
- file->f_pos = offset;
- ret = file->f_pos;
- break;
- case 1:
- file->f_pos += offset;
- ret = file->f_pos;
- }
- unlock_kernel();
- return ret;
-}
-
int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer_arg, size_t *lenp)
+ void __user *buffer_arg, size_t *lenp, loff_t *ppos)
{
int vleft, first=1, len, left, val;
char __user *buffer = (char __user *) buffer_arg;
if (!(cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR))
return -EFAULT;
- if ( /*!table->maxlen ||*/ (filp->f_pos && !write)) {
+ if ( /*!table->maxlen ||*/ (*ppos && !write)) {
*lenp = 0;
return 0;
}
}
if (!write && !first && left) {
- if(put_user('\n', (char *) buffer))
+ if(put_user('\n', (char __user *) buffer))
return -EFAULT;
left--, buffer++;
}
if (write) {
- p = (char *) buffer;
+ char __user *s = (char __user *) buffer;
while (left) {
char c;
- if(get_user(c, p++))
+ if(get_user(c, s++))
return -EFAULT;
if (!isspace(c))
break;
if (write && first)
return -EINVAL;
*lenp -= left;
- filp->f_pos += *lenp;
+ *ppos += *lenp;
return 0;
}
regs->gpr[1] = sp;
regs->msr = MSR_USER;
if (last_task_used_math == current)
- last_task_used_math = 0;
+ last_task_used_math = NULL;
if (last_task_used_altivec == current)
- last_task_used_altivec = 0;
+ last_task_used_altivec = NULL;
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr = 0;
#ifdef CONFIG_ALTIVEC
#endif
else
val = __unpack_fe01(tsk->thread.fpexc_mode);
- return put_user(val, (unsigned int *) adr);
+ return put_user(val, (unsigned int __user *) adr);
}
int sys_clone(unsigned long clone_flags, unsigned long usp,
/*
* Get contents of AltiVec register state in task TASK
*/
-static inline int get_vrregs(unsigned long *data, struct task_struct *task)
+static inline int get_vrregs(unsigned long __user *data, struct task_struct *task)
{
int i, j;
/*
* Write contents of AltiVec register state into task TASK.
*/
-static inline int set_vrregs(struct task_struct *task, unsigned long *data)
+static inline int set_vrregs(struct task_struct *task, unsigned long __user *data)
{
int i, j;
ret = -EIO;
if (copied != sizeof(tmp))
break;
- ret = put_user(tmp,(unsigned long *) data);
+ ret = put_user(tmp,(unsigned long __user *) data);
break;
}
preempt_enable();
tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
}
- ret = put_user(tmp,(unsigned long *) data);
+ ret = put_user(tmp,(unsigned long __user *) data);
break;
}
if (child->thread.regs->msr & MSR_VEC)
giveup_altivec(child);
preempt_enable();
- ret = get_vrregs((unsigned long *)data, child);
+ ret = get_vrregs((unsigned long __user *)data, child);
break;
case PTRACE_SETVRREGS:
if (child->thread.regs->msr & MSR_VEC)
giveup_altivec(child);
preempt_enable();
- ret = set_vrregs(child, (unsigned long *)data);
+ ret = set_vrregs(child, (unsigned long __user *)data);
break;
#endif
#ifdef CONFIG_SPE
/* Get the child spe register state. */
if (child->thread.regs->msr & MSR_SPE)
giveup_spe(child);
- ret = get_evrregs((unsigned long *)data, child);
+ ret = get_evrregs((unsigned long __user *)data, child);
break;
case PTRACE_SETEVRREGS:
* of register state from memory */
if (child->thread.regs->msr & MSR_SPE)
giveup_spe(child);
- ret = set_evrregs(child, (unsigned long *)data);
+ ret = set_evrregs(child, (unsigned long __user *)data);
break;
#endif
}
__setup("l2cr=", ppc_setup_l2cr);
-#ifdef CONFIG_NVRAM
+#ifdef CONFIG_GENERIC_NVRAM
/* Generic nvram hooks used by drivers/char/gen_nvram.c */
unsigned char nvram_read_byte(int addr)
#ifdef CONFIG_XMON
xmon_map_scc();
if (strstr(cmd_line, "xmon"))
- xmon(0);
+ xmon(NULL);
#endif /* CONFIG_XMON */
if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
* altivec/spe instructions at some point.
*/
static int
-save_user_regs(struct pt_regs *regs, struct mcontext *frame, int sigret)
+save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret)
{
/* save general and floating-point registers */
CHECK_FULL_REGS(regs);
* significant bits of a vector, we "cheat" and stuff VRSAVE in the
* most significant bits of that same vector. --BenH
*/
- if (__put_user(current->thread.vrsave, (u32 *)&frame->mc_vregs[32]))
+ if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
return 1;
#endif /* CONFIG_ALTIVEC */
memset(¤t->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
/* Always get VRSAVE back */
- if (__get_user(current->thread.vrsave, (u32 *)&sr->mc_vregs[32]))
+ if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
return 1;
#endif /* CONFIG_ALTIVEC */
static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
{
sigset_t set;
- struct mcontext *mcp;
+ struct mcontext __user *mcp;
if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(set))
|| __get_user(mcp, &ucp->uc_regs))
if (new_ctx == NULL)
return 0;
if (verify_area(VERIFY_READ, new_ctx, sizeof(*new_ctx))
- || __get_user(tmp, (u8 *) new_ctx)
- || __get_user(tmp, (u8 *) (new_ctx + 1) - 1))
+ || __get_user(tmp, (u8 __user *) new_ctx)
+ || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
return -EFAULT;
/*
/* create a stack frame for the caller of the handler */
newsp -= __SIGNAL_FRAMESIZE;
- if (verify_area(VERIFY_WRITE, (void *) newsp, origsp - newsp))
+ if (verify_area(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
goto badframe;
#if _NSIG != 64
set.sig[1] = sigctx._unused[3];
restore_sigmask(&set);
- sr = (struct mcontext *) sigctx.regs;
+ sr = (struct mcontext __user *) sigctx.regs;
if (verify_area(VERIFY_READ, sr, sizeof(*sr))
|| restore_user_regs(regs, sr, 1))
goto badframe;
break;
case SEMTIMEDOP:
ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
- second, (const struct timespec *) fifth);
+ second, (const struct timespec __user *) fifth);
break;
case SEMGET:
ret = sys_semget (first, second, third);
if (!ptr)
break;
if ((ret = verify_area (VERIFY_READ, ptr, sizeof(long)))
- || (ret = get_user(fourth.__pad, (void *__user *)ptr)))
+ || (ret = get_user(fourth.__pad, (void __user *__user *)ptr)))
break;
ret = sys_semctl (first, second, third, fourth);
break;
* sys_select() with the appropriate args. -- Cort
*/
int
-ppc_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp)
+ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
{
if ( (unsigned long)n >= 4096 )
{
unsigned long __user *buffer = (unsigned long __user *)n;
if (verify_area(VERIFY_READ, buffer, 5*sizeof(unsigned long))
|| __get_user(n, buffer)
- || __get_user(inp, ((fd_set **)(buffer+1)))
- || __get_user(outp, ((fd_set **)(buffer+2)))
- || __get_user(exp, ((fd_set **)(buffer+3)))
- || __get_user(tvp, ((struct timeval **)(buffer+4))))
+ || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
+ || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
+ || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
+ || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
return -EFAULT;
}
return sys_select(n, inp, outp, exp, tvp);
info.si_signo = signr;
info.si_errno = 0;
info.si_code = code;
- info.si_addr = (void *) addr;
+ info.si_addr = (void __user *) addr;
force_sig_info(signr, &info, current);
}
unsigned int va, vb, vc, vd;
vector128 *vrs;
- if (get_user(instr, (unsigned int *) regs->nip))
+ if (get_user(instr, (unsigned int __user *) regs->nip))
return -EFAULT;
if ((instr >> 26) != 4)
return -EINVAL; /* not an altivec instruction */
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
info->alignment = alignment;
/* Validate size */
if (size <= 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
/* The region must be aligned */
s = (unsigned long)start;
e = e & ~m;
if (assure_empty(info, 1) < 0)
- return NULL;
+ return ERR_PTR(-ENOMEM);
blk = NULL;
list_for_each(l, &info->free_list) {
}
if (blk == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Perfect fit */
if (bs == s && be == e) {
/* Validate size */
if (size <= 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
/* Align to configured alignment */
size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
if (assure_empty(info, 1) < 0)
- return NULL;
+ return ERR_PTR(-ENOMEM);
blk = NULL;
list_for_each(l, &info->free_list) {
}
if (blk == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Just fits */
if (blk->size == size) {
/* Validate size */
if (size <= 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
/* The region must be aligned */
s = (unsigned long)start;
e = e & ~m;
if (assure_empty(info, 2) < 0)
- return NULL;
+ return ERR_PTR(-ENOMEM);
blk = NULL;
list_for_each(l, &info->free_list) {
}
if (blk == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Perfect fit */
if (bs == s && be == e) {
}
/*
- * Configure PPC44x TLB for AS0 exception processing.
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
-static void __init
-ppc44x_tlb_config(void)
+void __init MMU_init_hw(void)
+{
+ flush_instruction_cache();
+}
+
+unsigned long __init mmu_mapin_ram(void)
{
unsigned int pinned_tlbs = 1;
int i;
unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
}
-}
-
-/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
- */
-void __init MMU_init_hw(void)
-{
- flush_instruction_cache();
-
- ppc44x_tlb_config();
-}
-
-/* TODO: Add large page lowmem mapping support */
-unsigned long __init mmu_mapin_ram(void)
-{
- unsigned long v, s, f = _PAGE_GUARDED;
- phys_addr_t p;
-
- v = KERNELBASE;
- p = PPC_MEMSTART;
-
- for (s = 0; s < total_lowmem; s += PAGE_SIZE) {
- if ((char *) v >= _stext && (char *) v < etext)
- f |= _PAGE_RAM_TEXT;
- else
- f |= _PAGE_RAM;
- map_page(v, p, f);
- v += PAGE_SIZE;
- p += PAGE_SIZE;
- }
-
- if (ppc_md.progress)
- ppc_md.progress("MMU:mmu_mapin_ram done", 0x401);
- return s;
+ return total_lowmem;
}
# Makefile for the linux ppc-specific parts of the memory manager.
#
-ifdef CONFIG_PPC64BRIDGE
-EXTRA_AFLAGS := -Wa,-mppc64bridge
-endif
-
obj-y := fault.o init.o mem_pieces.o \
mmu_context.o pgtable.o
{
unsigned int inst;
- if (get_user(inst, (unsigned int *)regs->nip))
+ if (get_user(inst, (unsigned int __user *)regs->nip))
return 0;
/* check for 1 in the rA field */
if (((inst >> 16) & 0x1f) != 1)
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = code;
- info.si_addr = (void *) address;
+ info.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &info, current);
return 0;
}
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
- info.si_addr = (void *)address;
+ info.si_addr = (void __user *)address;
force_sig_info (SIGBUS, &info, current);
if (!user_mode(regs))
return SIGBUS;
depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
default y
+config PPC4xx_DMA
+ bool "PPC4xx DMA controller support"
+ depends on 4xx
+
+config PPC4xx_EDMA
+ bool
+ depends on !STB03xxx && PPC4xx_DMA
+ default y
+
config PM
bool "Power Management support (EXPERIMENTAL)"
depends on 4xx && EXPERIMENTAL
#define UART0_IO_BASE (u8 *) 0xE0000200
#define UART1_IO_BASE (u8 *) 0xE0000300
-#define BASE_BAUD 33000000/3/16
+/* external Epson SG-615P */
+#define BASE_BAUD 691200
#define STD_UART_OP(num) \
{ 0, BASE_BAUD, 0, UART##num##_INT, \
default MPC8540_ADS
config MPC8540_ADS
- bool "MPC8540ADS"
+ bool "Freescale MPC8540 ADS"
help
This option enables support for the MPC 8540 ADS evaluation board.
+config MPC8555_CDS
+ bool "Freescale MPC8555 CDS"
+ help
+ This option enablese support for the MPC8555 CDS evaluation board.
+
+config MPC8560_ADS
+ bool "Freescale MPC8560 ADS"
+ help
+ This option enables support for the MPC 8560 ADS evaluation board.
+
config SBC8560
bool "WindRiver PowerQUICC III SBC8560"
help
depends on MPC8540_ADS
default y
+config MPC8555
+ bool
+ depends on MPC8555_CDS
+ default y
+
config MPC8560
bool
- depends on SBC8560
+ depends on SBC8560 || MPC8560_ADS
+ default y
+
+config 85xx_PCI2
+ bool "Supprt for 2nd PCI host controller"
+ depends on MPC8555_CDS
default y
config FSL_OCP
config PPC_GEN550
bool
- depends on MPC8540 || SBC8560
+ depends on MPC8540 || SBC8560 || MPC8555
default y
endmenu
#
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads_common.o mpc8540_ads.o
+obj-$(CONFIG_MPC8555_CDS) += mpc85xx_cds_common.o
+obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads_common.o mpc8560_ads.o
obj-$(CONFIG_SBC8560) += sbc85xx.o sbc8560.o
obj-$(CONFIG_MPC8540) += mpc8540.o
+obj-$(CONFIG_MPC8555) += mpc8555.o
obj-$(CONFIG_MPC8560) += mpc8560.o
#include <linux/serial.h>
#include <linux/tty.h> /* for linux/serial_core.h */
#include <linux/serial_core.h>
+#include <linux/initrd.h>
#include <linux/module.h>
#include <asm/system.h>
#define __MACH_MPC8540ADS_H__
#include <linux/config.h>
-#include <linux/serial.h>
#include <linux/initrd.h>
#include <syslib/ppc85xx_setup.h>
#include <platforms/85xx/mpc85xx_ads_common.h>
#include <linux/serial.h>
#include <linux/tty.h> /* for linux/serial_core.h */
#include <linux/serial_core.h>
+#include <linux/initrd.h>
#include <linux/module.h>
#include <linux/initrd.h>
#define __MACH_SBC8560_H__
#include <linux/config.h>
-#include <linux/serial.h>
#include <platforms/85xx/sbc85xx.h>
+
+#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET)
#ifdef CONFIG_SERIAL_MANY_PORTS
#define RS_TABLE_SIZE 64
# Makefile for the linux kernel.
#
-ifdef CONFIG_PPC64BRIDGE
-EXTRA_AFLAGS := -Wa,-mppc64bridge
-endif
-ifdef CONFIG_40x
-EXTRA_AFLAGS := -Wa,-m405
-endif
-
# Extra CFLAGS so we don't have to do relative includes
CFLAGS_pmac_setup.o += -Iarch/$(ARCH)/mm
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac_cpufreq.o
endif
obj-$(CONFIG_PMAC_BACKLIGHT) += pmac_backlight.o
-obj-$(CONFIG_PPC_RTAS) += error_log.o proc_rtas.o
obj-$(CONFIG_PREP_RESIDUAL) += residual.o
obj-$(CONFIG_ADIR) += adir_setup.o adir_pic.o adir_pci.o
obj-$(CONFIG_EST8260) += est8260_setup.o
obj-$(CONFIG_PQ2ADS) += pq2ads_setup.o
obj-$(CONFIG_TQM8260) += tqm8260_setup.o
-obj-$(CONFIG_EV64260) += ev64260.o
-obj-$(CONFIG_DMV182) += dmv182.o
+obj-$(CONFIG_EV64260) += ev64260_setup.o
obj-$(CONFIG_GEMINI) += gemini_pci.o gemini_setup.o gemini_prom.o
obj-$(CONFIG_K2) += k2.o
obj-$(CONFIG_LOPEC) += lopec_setup.o lopec_pci.o
obj-$(CONFIG_PPLUS) += pplus.o
obj-$(CONFIG_PRPMC750) += prpmc750.o
obj-$(CONFIG_PRPMC800) += prpmc800.o
+obj-$(CONFIG_RPX8260) += rpx8260.o
obj-$(CONFIG_SANDPOINT) += sandpoint.o
obj-$(CONFIG_SBC82xx) += sbc82xx.o
obj-$(CONFIG_SPRUCE) += spruce.o
+obj-$(CONFIG_LITE5200) += lite5200.o mpc5200.o
ifeq ($(CONFIG_SMP),y)
obj-$(CONFIG_PPC_PMAC) += pmac_smp.o
/*
* arch/ppc/platforms/ev64260.h
- *
+ *
* Definitions for Marvell/Galileo EV-64260-BP Evaluation Board.
*
* Author: Mark A. Greer <mgreer@mvista.com>
*
- * 2001-2002 (c) MontaVista, Software, Inc. This file is licensed under
+ * 2001 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
/*
- * The MV64x60 has 2 PCI buses each with 1 window from the CPU bus to
+ * The GT64260 has 2 PCI buses each with 1 window from the CPU bus to
* PCI I/O space and 4 windows from the CPU bus to PCI MEM space.
* We'll only use one PCI MEM window on each PCI bus.
- *
- * This is the CPU physical memory map (windows must be at least 1MB and start
- * on a boundary that is a multiple of the window size):
- *
- * 0xfc000000-0xffffffff - External FLASH on device module
- * 0xfbf00000-0xfbffffff - Embedded (on board) FLASH
- * 0xfbe00000-0xfbefffff - GT64260 Registers (preferably)
- * but really a config option
- * 0xfbd00000-0xfbdfffff - External SRAM on device module
- * 0xfbc00000-0xfbcfffff - TODC chip on device module
- * 0xfbb00000-0xfbbfffff - External UART on device module
- * 0xa2000000-0xfbafffff - <hole>
- * 0xa1000000-0xa1ffffff - PCI 1 I/O (defined in gt64260.h)
- * 0xa0000000-0xa0ffffff - PCI 0 I/O (defined in gt64260.h)
- * 0x90000000-0x9fffffff - PCI 1 MEM (defined in gt64260.h)
- * 0x80000000-0x8fffffff - PCI 0 MEM (defined in gt64260.h)
*/
#ifndef __PPC_PLATFORMS_EV64260_H
#define __PPC_PLATFORMS_EV64260_H
-#ifndef MAX
-#define MAX(a,b) (((a) > (b)) ? (a) : (b))
-#endif
-
-/*
- * CPU Physical Memory Map setup.
- */
-#define EV64260_EXT_FLASH_BASE 0xfc000000
-#define EV64260_EMB_FLASH_BASE 0xfbf00000
-#define EV64260_EXT_SRAM_BASE 0xfbd00000
-#define EV64260_TODC_BASE 0xfbc00000
-#define EV64260_UART_BASE 0xfbb00000
+#define EV64260_BRIDGE_REG_BASE 0xf8000000
+#define EV64260_BRIDGE_REG_BASE_TO_TOP 0x08000000U
-#define EV64260_EXT_FLASH_SIZE_ACTUAL 0x04000000 /* <= 64MB Extern FLASH */
-#define EV64260_EMB_FLASH_SIZE_ACTUAL 0x00080000 /* 512KB of Embed FLASH */
-#define EV64260_EXT_SRAM_SIZE_ACTUAL 0x00100000 /* 1MB SDRAM */
-#define EV64260_TODC_SIZE_ACTUAL 0x00000020 /* 32 bytes for TODC */
-#define EV64260_UART_SIZE_ACTUAL 0x00000040 /* 64 bytes for DUART */
-
-#define EV64260_EXT_FLASH_SIZE MAX(GT64260_WINDOW_SIZE_MIN, \
- EV64260_EXT_FLASH_SIZE_ACTUAL)
-#define EV64260_EMB_FLASH_SIZE MAX(GT64260_WINDOW_SIZE_MIN, \
- EV64260_EMB_FLASH_SIZE_ACTUAL)
-#define EV64260_EXT_SRAM_SIZE MAX(GT64260_WINDOW_SIZE_MIN, \
- EV64260_EXT_SRAM_SIZE_ACTUAL)
-#define EV64260_TODC_SIZE MAX(GT64260_WINDOW_SIZE_MIN, \
- EV64260_TODC_SIZE_ACTUAL)
-#if 0 /* XXXX blows up assembler in bootloader */
-#define EV64260_UART_SIZE MAX(GT64260_WINDOW_SIZE_MIN, \
- EV64260_UART_SIZE_ACTUAL)
-#else
-#define EV64260_UART_SIZE GT64260_WINDOW_SIZE_MIN
-#endif
-#define EV64260_UART_END ((EV64260_UART_BASE + \
- EV64260_UART_SIZE - 1) & 0xfff00000)
-
-/*
- * Board-specific IRQ info
- */
-#define EV64260_UART_0_IRQ 85
-#define EV64260_UART_1_IRQ 86
-#define EV64260_PCI_0_IRQ 91
-#define EV64260_PCI_1_IRQ 93
+#define EV64260_TODC_BASE 0xfc800000
+#define EV64260_TODC_LEN 0x00800000
+#define EV64260_TODC_END (EV64260_TODC_BASE + \
+ EV64260_TODC_LEN - 1)
-/*
- * Serial port setup.
- */
-#define EV64260_DEFAULT_BAUD 115200
-
-#if defined(CONFIG_SERIAL_MPSC_CONSOLE)
-#define SERIAL_PORT_DFNS
-
-#define EV64260_MPSC_CLK_SRC 8 /* TCLK */
-#define EV64260_MPSC_CLK_FREQ 100000000 /* 100MHz clk */
-#else
+#define EV64260_UART_BASE 0xfd000000
+#define EV64260_UART_LEN 0x00800000
+#define EV64260_UART_END (EV64260_UART_BASE + \
+ EV64260_UART_LEN - 1)
+/* Serial driver setup. */
#define EV64260_SERIAL_0 (EV64260_UART_BASE + 0x20)
#define EV64260_SERIAL_1 EV64260_UART_BASE
-#define BASE_BAUD (EV64260_DEFAULT_BAUD * 2)
+#define BASE_BAUD ( 3686400 / 16 )
#ifdef CONFIG_SERIAL_MANY_PORTS
#define RS_TABLE_SIZE 64
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF|ASYNC_SKIP_TEST)
#endif
+#if !defined(CONFIG_GT64260_CONSOLE)
/* Required for bootloader's ns16550.c code */
#define STD_SERIAL_PORT_DFNS \
- { 0, BASE_BAUD, EV64260_SERIAL_0, EV64260_UART_0_IRQ, STD_COM_FLAGS, \
- iomem_base: (u8 *)EV64260_SERIAL_0, /* ttyS0 */ \
- iomem_reg_shift: 2, \
- io_type: SERIAL_IO_MEM },
-
-#if 0
- { 1, BASE_BAUD, EV64260_SERIAL_1, EV64260_UART_1_IRQ, STD_COM_FLAGS, \
- iomem_base: (u8 *)EV64260_SERIAL_1, /* ttyS1 */ \
+ { 0, BASE_BAUD, EV64260_SERIAL_0, 85, STD_COM_FLAGS, /* ttyS0 */\
+ iomem_base: (u8 *)EV64260_SERIAL_0, \
iomem_reg_shift: 2, \
io_type: SERIAL_IO_MEM },
-#endif
#define SERIAL_PORT_DFNS \
STD_SERIAL_PORT_DFNS
+#else
+#define SERIAL_PORT_DFNS
#endif
+
#endif /* __PPC_PLATFORMS_EV64260_H */
static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode)
{
struct device_node *volt_gpio_np;
+ u32 *reg;
/* OF only reports the high frequency */
hi_freq = cur_freq;
return 1;
}
- u32 *reg = (u32 *)get_property(volt_gpio_np, "reg", NULL);
+ reg = (u32 *)get_property(volt_gpio_np, "reg", NULL);
voltage_gpio = *reg;
set_speed_proc = dfs_set_cpu_speed;
int len;
/* For PCI<->PCI bridges or CardBus bridges, we go down */
- class_code = (unsigned int *) get_property(node, "class-code", 0);
+ class_code = (unsigned int *) get_property(node, "class-code", NULL);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
* (iBook, G4, new IMacs, and all the recent Apple machines).
* It contains 3 controllers in one ASIC.
*
- * The U3 is the bridge used on G5 machines. It contains on
+ * The U3 is the bridge used on G5 machines. It contains an
* AGP bus which is dealt with the old UniNorth access routines
- * and an HyperTransport bus which uses its own set of access
+ * and a HyperTransport bus which uses its own set of access
* functions.
*/
continue;
if (0x0035 != *prop)
continue;
- prop = (u32 *)get_property(nec, "reg", 0);
+ prop = (u32 *)get_property(nec, "reg", NULL);
if (prop == NULL)
continue;
devfn = (prop[0] >> 8) & 0xff;
* any of the 0xfxxxxxxx "fine" memory regions to /ht.
* We need to fix that sooner or later by either parsing all child "ranges"
* properties or figuring out the U3 address space decoding logic and
- * then read it's configuration register (if any).
+ * then read its configuration register (if any).
*/
hose->io_base_phys = 0xf4000000 + 0x00400000;
hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000);
* default, gmac is not powered up, and so will be absent
* from the kernel initial PCI lookup.
*
- * Should be replaced by 2.4 new PCI mecanisms and really
- * regiser the device.
+ * Should be replaced by 2.4 new PCI mechanisms and really
+ * register the device.
*/
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
+/* When an irq gets requested for the first client, if it's an
+ * edge interrupt, we clear any previous one on the controller
+ */
+static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr)
+{
+ unsigned long bit = 1UL << (irq_nr & 0x1f);
+ int i = irq_nr >> 5;
+
+ if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
+ out_le32(&pmac_irq_hw[i]->ack, bit);
+ set_bit(irq_nr, ppc_cached_irq_mask);
+ pmac_set_irq_mask(irq_nr, 0);
+
+ return 0;
+}
+
static void __pmac pmac_mask_irq(unsigned int irq_nr)
{
clear_bit(irq_nr, ppc_cached_irq_mask);
struct hw_interrupt_type pmac_pic = {
- " PMAC-PIC ",
- NULL,
- NULL,
- pmac_unmask_irq,
- pmac_mask_irq,
- pmac_mask_and_ack_irq,
- pmac_end_irq,
- NULL
+ .typename = " PMAC-PIC ",
+ .startup = pmac_startup_irq,
+ .enable = pmac_unmask_irq,
+ .disable = pmac_mask_irq,
+ .ack = pmac_mask_and_ack_irq,
+ .end = pmac_end_irq,
};
struct hw_interrupt_type gatwick_pic = {
- " GATWICK ",
- NULL,
- NULL,
- pmac_unmask_irq,
- pmac_mask_irq,
- pmac_mask_and_ack_irq,
- pmac_end_irq,
- NULL
+ .typename = " GATWICK ",
+ .startup = pmac_startup_irq,
+ .enable = pmac_unmask_irq,
+ .disable = pmac_mask_irq,
+ .ack = pmac_mask_and_ack_irq,
+ .end = pmac_end_irq,
};
static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
nmi_irq = pswitch->intrs[0].line;
openpic_init_nmi_irq(nmi_irq);
request_irq(nmi_irq, xmon_irq, 0,
- "NMI - XMON", 0);
+ "NMI - XMON", NULL);
}
}
#endif /* CONFIG_XMON */
for ( i = max_real_irqs ; i < max_irqs ; i++ )
irq_desc[i].handler = &gatwick_pic;
request_irq( irq_cascade, gatwick_action, SA_INTERRUPT,
- "cascade", 0 );
+ "cascade", NULL );
}
printk("System has %d possible interrupts\n", max_irqs);
if (max_irqs != max_real_irqs)
max_real_irqs);
#ifdef CONFIG_XMON
- request_irq(20, xmon_irq, 0, "NMI - XMON", 0);
+ request_irq(20, xmon_irq, 0, "NMI - XMON", NULL);
#endif /* CONFIG_XMON */
}
/* reset the entry point so if we get another intr we won't
* try to startup again */
out_be32(psurge_start, 0x100);
- if (request_irq(30, psurge_primary_intr, SA_INTERRUPT, "primary IPI", 0))
+ if (request_irq(30, psurge_primary_intr, SA_INTERRUPT, "primary IPI", NULL))
printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
}
/* Check the first PCI device to see if it is a Raven. */
- early_read_config_dword(0, 0, 0, PCI_VENDOR_ID, &devid);
+ early_read_config_dword(NULL, 0, 0, PCI_VENDOR_ID, &devid);
switch (devid & 0xffff0000) {
case MPIC_RAVEN_ID:
/* Read the memory base register. */
- early_read_config_dword(0, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
+ early_read_config_dword(NULL, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
if (pci_membase == 0) {
OpenPIC_Addr = NULL;
irq_desc[i].handler = &i8259_pic;
/* If we have a Raven PCI bridge or a Hawk PCI bridge / Memory
* controller, we poll (as they have a different int-ack address). */
- early_read_config_dword(0, 0, 0, PCI_VENDOR_ID, &pci_viddid);
+ early_read_config_dword(NULL, 0, 0, PCI_VENDOR_ID, &pci_viddid);
pci_did = (pci_viddid & 0xffff0000) >> 16;
if (((pci_viddid & 0xffff) == PCI_VENDOR_ID_MOTOROLA)
&& ((pci_did == PCI_DEVICE_ID_MOTOROLA_RAVEN)
!(n--) ) return res->Devices+i;
#undef Dev
}
- return 0;
+ return NULL;
}
PPC_DEVICE __init *residual_find_device_id(unsigned long BusMask,
!(n--) ) return res->Devices+i;
#undef Dev
}
- return 0;
+ return NULL;
}
PnP_TAG_PACKET *PnP_find_packet(unsigned char *p,
int n)
{
unsigned mask, masked_tag, size;
- if(!p) return 0;
+ if(!p) return NULL;
if (tag_type(packet_tag)) mask=0xff; else mask=0xF8;
masked_tag = packet_tag&mask;
for(; *p != END_TAG; p+=size) {
else
size=tag_small_count(*p)+1;
}
- return 0; /* not found */
+ return NULL; /* not found */
}
PnP_TAG_PACKET __init *PnP_find_small_vendor_packet(unsigned char *p,
return (PnP_TAG_PACKET *) p;
next = 1;
};
- return 0; /* not found */
+ return NULL; /* not found */
}
PnP_TAG_PACKET __init *PnP_find_large_vendor_packet(unsigned char *p,
return (PnP_TAG_PACKET *) p;
next = 1;
};
- return 0; /* not found */
+ return NULL; /* not found */
}
#ifdef CONFIG_PROC_PREPRESIDUAL
# Makefile for the linux kernel.
#
-ifdef CONFIG_PPC64BRIDGE
-EXTRA_AFLAGS := -Wa,-mppc64bridge
-endif
-ifdef CONFIG_4xx
-EXTRA_AFLAGS := -Wa,-m405
-endif
-ifdef CONFIG_E500
-EXTRA_AFLAGS := -Wa,-me500
-endif
-
CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
obj-$(CONFIG_4xx) += ppc4xx_pic.o
obj-$(CONFIG_40x) += ppc4xx_setup.o
obj-$(CONFIG_GEN_RTC) += todc_time.o
-obj-$(CONFIG_KGDB) += ppc4xx_kgdb.o
+obj-$(CONFIG_PPC4xx_DMA) += ppc4xx_dma.o
+obj-$(CONFIG_PPC4xx_EDMA) += ppc4xx_sgdma.o
ifeq ($(CONFIG_40x),y)
obj-$(CONFIG_KGDB) += ppc4xx_kgdb.o
obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o ppc405_pci.o
obj-$(CONFIG_ADIR) += i8259.o indirect_pci.o pci_auto.o \
todc_time.o
obj-$(CONFIG_EBONY) += indirect_pci.o pci_auto.o todc_time.o
-obj-$(CONFIG_EV64260) += indirect_pci.o todc_time.o pci_auto.o
-obj-$(CONFIG_DMV182) += indirect_pci.o todc_time.o pci_auto.o
+obj-$(CONFIG_EV64260) += gt64260_common.o gt64260_pic.o \
+ indirect_pci.o todc_time.o pci_auto.o
obj-$(CONFIG_GEMINI) += open_pic.o indirect_pci.o
-obj-$(CONFIG_GT64260) += gt64260_pic.o
obj-$(CONFIG_K2) += i8259.o indirect_pci.o todc_time.o \
pci_auto.o
obj-$(CONFIG_LOPEC) += i8259.o pci_auto.o todc_time.o
open_pic.o i8259.o hawk_common.o
obj-$(CONFIG_MENF1) += todc_time.o i8259.o mpc10x_common.o \
pci_auto.o indirect_pci.o
-obj-$(CONFIG_MV64360) += mv64360_pic.o
-obj-$(CONFIG_MV64X60) += mv64x60.o mv64x60_ocp.o
obj-$(CONFIG_MVME5100) += open_pic.o todc_time.o indirect_pci.o \
i8259.o pci_auto.o hawk_common.o
obj-$(CONFIG_OCOTEA) += indirect_pci.o pci_auto.o todc_time.o
obj-$(CONFIG_SBC82xx) += todc_time.o
obj-$(CONFIG_SPRUCE) += cpc700_pic.o indirect_pci.o pci_auto.o \
todc_time.o
-obj-$(CONFIG_8260) += m8260_setup.o cpm2_pic.o
+obj-$(CONFIG_8260) += m8260_setup.o
obj-$(CONFIG_PCI_8260) += m8260_pci.o indirect_pci.o
obj-$(CONFIG_8260_PCI9) += m8260_pci_erratum9.o
-obj-$(CONFIG_CPM2) += cpm2_common.o
+obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o
ifeq ($(CONFIG_PPC_GEN550),y)
obj-$(CONFIG_KGDB) += gen550_kgdb.o gen550_dbg.o
obj-$(CONFIG_SERIAL_TEXT_DEBUG) += gen550_dbg.o
ifeq ($(CONFIG_85xx),y)
obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o
endif
+obj-$(CONFIG_PPC_MPC52xx) += mpc52xx_setup.o mpc52xx_pic.o
*/
cpm2_map_t *cpm2_immr;
+#define CPM_MAP_SIZE (0x40000) /* 256k - the PQ3 reserve this amount
+ of space for CPM as it is larger
+ than on PQ2 */
+
void
cpm2_reset(void)
{
- cpm2_immr = (cpm2_map_t *)CPM_MAP_ADDR;
+ cpm2_immr = (cpm2_map_t *)ioremap(CPM_MAP_ADDR, CPM_MAP_SIZE);
/* Reclaim the DP memory for our use.
*/
* oversampled clock.
*/
void
-cpm2_setbrg(uint brg, uint rate)
+cpm_setbrg(uint brg, uint rate)
{
volatile uint *bp;
static void cpm2_dpinit(void)
{
- void *dprambase = &((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase;
-
spin_lock_init(&cpm_dpmem_lock);
/* initialize the info header */
* varies with the processor and the microcode patches activated.
* But the following should be at least safe.
*/
- rh_attach_region(&cpm_dpmem_info, dprambase + CPM_DATAONLY_BASE,
+ rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE,
CPM_DATAONLY_SIZE);
}
-/* This function used to return an index into the DPRAM area.
- * Now it returns the actuall physical address of that area.
- * use cpm2_dpram_offset() to get the index
+/* This function returns an index into the DPRAM area.
*/
-void *cpm2_dpalloc(uint size, uint align)
+uint cpm_dpalloc(uint size, uint align)
{
void *start;
unsigned long flags;
start = rh_alloc(&cpm_dpmem_info, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return start;
+ return (uint)start;
}
-EXPORT_SYMBOL(cpm2_dpalloc);
+EXPORT_SYMBOL(cpm_dpalloc);
-int cpm2_dpfree(void *addr)
+int cpm_dpfree(uint offset)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
- ret = rh_free(&cpm_dpmem_info, addr);
+ ret = rh_free(&cpm_dpmem_info, (void *)offset);
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
return ret;
}
-EXPORT_SYMBOL(cpm2_dpfree);
+EXPORT_SYMBOL(cpm_dpfree);
/* not sure if this is ever needed */
-void *cpm2_dpalloc_fixed(void *addr, uint size, uint align)
+uint cpm_dpalloc_fixed(uint offset, uint size, uint align)
{
void *start;
unsigned long flags;
spin_lock_irqsave(&cpm_dpmem_lock, flags);
cpm_dpmem_info.alignment = align;
- start = rh_alloc_fixed(&cpm_dpmem_info, addr, size, "commproc");
+ start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc");
spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
- return start;
+ return (uint)start;
}
-EXPORT_SYMBOL(cpm2_dpalloc_fixed);
+EXPORT_SYMBOL(cpm_dpalloc_fixed);
-void cpm2_dpdump(void)
+void cpm_dpdump(void)
{
rh_dump(&cpm_dpmem_info);
}
-EXPORT_SYMBOL(cpm2_dpdump);
-
-uint cpm2_dpram_offset(void *addr)
-{
- return (uint)((u_char *)addr -
- ((uint)((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase));
-}
-EXPORT_SYMBOL(cpm2_dpram_offset);
+EXPORT_SYMBOL(cpm_dpdump);
-void *cpm2_dpram_addr(int offset)
+void *cpm_dpram_addr(uint offset)
{
- return (void *)&((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase[offset];
+ return (void *)&cpm2_immr->im_dprambase[offset];
}
-EXPORT_SYMBOL(cpm2_dpram_addr);
+EXPORT_SYMBOL(cpm_dpram_addr);
*
* Interrupt controller support for Galileo's GT64260.
*
- * Author: Chris Zankel <source@mvista.com>
+ * Author: Chris Zankel <chris@mvista.com>
* Modified by: Mark A. Greer <mgreer@mvista.com>
*
* Based on sources from Rabeeh Khoury / Galileo Technology
#include <asm/io.h>
#include <asm/system.h>
#include <asm/irq.h>
-#include <asm/ocp.h>
-#include <asm/mv64x60.h>
+#include <asm/gt64260.h>
/* ========================== forward declaration ========================== */
u32 gt64260_irq_base = 0; /* GT64260 handles the next 96 IRQs from here */
-static mv64x60_handle_t base_bh;
-static mv64x60_handle_t ic_bh;
-
/* gt64260_init_irq()
*
* This function initializes the interrupt controller. It assigns
__init void
gt64260_init_irq(void)
{
- struct ocp_def *def;
int i;
-/* XXXX extract reg base, irq base from ocp */
-/* XXXX rewrite read/write macros to not use 'bh'?? */
-/* XXXX Have to use ocp b/c can pass arg to this routine */
-
if ( ppc_md.progress ) ppc_md.progress("gt64260_init_irq: enter", 0x0);
- if ((def = ocp_get_one_device(OCP_VENDOR_MARVELL, OCP_FUNC_HB,
- OCP_ANY_INDEX)) == NULL) {
- /* XXXX SCREAM */
- return;
- }
- base_bh.v_base = (u32)ioremap(def->paddr, 0x10000); /* XXXX */
-
- if ((def = ocp_get_one_device(OCP_VENDOR_MARVELL, OCP_FUNC_PIC,
- OCP_ANY_INDEX)) == NULL) {
- /* XXXX SCREAM */
- return;
- }
- ic_bh.v_base = (u32)ioremap(def->paddr, 0x1000); /* XXXX */
-
ppc_cached_irq_mask[0] = 0;
ppc_cached_irq_mask[1] = 0x0f000000; /* Enable GPP intrs */
ppc_cached_irq_mask[2] = 0;
/* disable all interrupts and clear current interrupts */
- mv64x60_write(&base_bh, MV64x60_GPP_INTR_MASK, ppc_cached_irq_mask[2]);
- mv64x60_write(&base_bh, MV64x60_GPP_INTR_CAUSE,0);
- mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_LO, ppc_cached_irq_mask[0]);
- mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_HI, ppc_cached_irq_mask[1]);
+ gt_write(GT64260_GPP_INTR_MASK, ppc_cached_irq_mask[2]);
+ gt_write(GT64260_GPP_INTR_CAUSE,0);
+ gt_write(GT64260_IC_CPU_INTR_MASK_LO, ppc_cached_irq_mask[0]);
+ gt_write(GT64260_IC_CPU_INTR_MASK_HI, ppc_cached_irq_mask[1]);
/* use the gt64260 for all (possible) interrupt sources */
for( i = gt64260_irq_base; i < (gt64260_irq_base + 96); i++ ) {
}
-/*
- * gt64260_get_irq()
+/* gt64260_get_irq()
*
* This function returns the lowest interrupt number of all interrupts that
* are currently asserted.
int irq;
int irq_gpp;
- irq = mv64x60_read(&ic_bh, GT64260_IC_MAIN_CAUSE_LO);
+ irq = gt_read(GT64260_IC_MAIN_CAUSE_LO);
irq = __ilog2((irq & 0x3dfffffe) & ppc_cached_irq_mask[0]);
if (irq == -1) {
- irq = mv64x60_read(&ic_bh, GT64260_IC_MAIN_CAUSE_HI);
+ irq = gt_read(GT64260_IC_MAIN_CAUSE_HI);
irq = __ilog2((irq & 0x0f000db7) & ppc_cached_irq_mask[1]);
if (irq == -1) {
irq = -2; /* bogus interrupt, should never happen */
} else {
if (irq >= 24) {
- irq_gpp = mv64x60_read(&base_bh, MV64x60_GPP_INTR_CAUSE);
+ irq_gpp = gt_read(GT64260_GPP_INTR_CAUSE);
irq_gpp = __ilog2(irq_gpp &
ppc_cached_irq_mask[2]);
irq = -2;
} else {
irq = irq_gpp + 64;
- mv64x60_write(&base_bh, MV64x60_GPP_INTR_CAUSE, ~(1<<(irq-64)));
+ gt_write(GT64260_GPP_INTR_CAUSE, ~(1<<(irq-64)));
}
} else {
irq += 32;
static void
gt64260_unmask_irq(unsigned int irq)
{
- /* XXXX
- printk("XXXX: *** unmask irq: %d\n", irq);
- */
irq -= gt64260_irq_base;
if (irq > 31) {
if (irq > 63) {
/* unmask GPP irq */
- mv64x60_write(&base_bh, MV64x60_GPP_INTR_MASK,
+ gt_write(GT64260_GPP_INTR_MASK,
ppc_cached_irq_mask[2] |= (1<<(irq-64)));
} else {
/* mask high interrupt register */
- mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_HI,
+ gt_write(GT64260_IC_CPU_INTR_MASK_HI,
ppc_cached_irq_mask[1] |= (1<<(irq-32)));
}
} else {
/* mask low interrupt register */
- mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_LO,
+ gt_write(GT64260_IC_CPU_INTR_MASK_LO,
ppc_cached_irq_mask[0] |= (1<<irq));
}
}
static void
gt64260_mask_irq(unsigned int irq)
{
- /* XXXX
- printk("XXXX: *** mask irq: %d\n", irq);
- */
irq -= gt64260_irq_base;
if (irq > 31) {
if (irq > 63) {
/* mask GPP irq */
- mv64x60_write(&base_bh, MV64x60_GPP_INTR_MASK,
+ gt_write(GT64260_GPP_INTR_MASK,
ppc_cached_irq_mask[2] &= ~(1<<(irq-64)));
} else {
/* mask high interrupt register */
- mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_HI,
+ gt_write(GT64260_IC_CPU_INTR_MASK_HI,
ppc_cached_irq_mask[1] &= ~(1<<(irq-32)));
}
} else {
/* mask low interrupt register */
- mv64x60_write(&ic_bh, GT64260_IC_CPU_INTR_MASK_LO,
+ gt_write(GT64260_IC_CPU_INTR_MASK_LO,
ppc_cached_irq_mask[0] &= ~(1<<irq));
}
volatile cpm2_map_t *immap = cpm2_immr;
/* allocate IDMA dpram */
- dpram_offset = cpm2_dpalloc(sizeof(idma_dpram_t), 64);
- idma_dpram =
- (volatile idma_dpram_t *)&immap->im_dprambase[dpram_offset];
+ dpram_offset = cpm_dpalloc(sizeof(idma_dpram_t), 64);
+ idma_dpram = cpm_dpram_addr(dpram_offset);
/* initialize the IDMA parameter RAM */
memset((void *)idma_dpram, 0, sizeof(idma_dpram_t));
* Externally called, however, it takes an IPI number (0...OPENPIC_NUM_IPI)
* and not a system-wide interrupt number
*/
-void openpic_cause_IPI(u_int ipi, u_int cpumask)
+void openpic_cause_IPI(u_int ipi, cpumask_t cpumask)
{
+ cpumask_t phys;
DECL_THIS_CPU;
CHECK_THIS_CPU;
check_arg_ipi(ipi);
+ phys = physmask(cpumask);
openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi),
- physmask(cpumask));
+ cpus_addr(physmask(cpumask))[0]);
}
void openpic_request_IPIs(void)
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset,
openpic_ipi_action, SA_INTERRUPT,
- "IPI0 (call function)", 0);
+ "IPI0 (call function)", NULL);
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+1,
openpic_ipi_action, SA_INTERRUPT,
- "IPI1 (reschedule)", 0);
+ "IPI1 (reschedule)", NULL);
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+2,
openpic_ipi_action, SA_INTERRUPT,
- "IPI2 (invalidate tlb)", 0);
+ "IPI2 (invalidate tlb)", NULL);
request_irq(OPENPIC_VEC_IPI+open_pic_irq_offset+3,
openpic_ipi_action, SA_INTERRUPT,
- "IPI3 (xmon break)", 0);
+ "IPI3 (xmon break)", NULL);
for ( i = 0; i < OPENPIC_NUM_IPI ; i++ )
openpic_enable_ipi(OPENPIC_VEC_IPI+open_pic_irq_offset+i);
spin_lock(&openpic_setup_lock);
#ifdef CONFIG_IRQ_ALL_CPUS
- cpu_set(smp_hw_index[smp_processor_id()], mask);
+ cpu_set(smp_hw_index[smp_processor_id()], msk);
/* let the openpic know we want intrs. default affinity
* is 0xffffffff until changed via /proc
void
smp_openpic_message_pass(int target, int msg, unsigned long data, int wait)
{
+ cpumask_t mask = CPU_MASK_ALL;
/* make sure we're sending something that translates to an IPI */
if (msg > 0x3) {
printk("SMP %d: smp_message_pass: unknown msg %d\n",
}
switch (target) {
case MSG_ALL:
- openpic_cause_IPI(msg, 0xffffffff);
+ openpic_cause_IPI(msg, mask);
break;
case MSG_ALL_BUT_SELF:
- openpic_cause_IPI(msg,
- 0xffffffff & ~(1 << smp_processor_id()));
+ cpu_clear(smp_processor_id(), mask);
+ openpic_cause_IPI(msg, mask);
break;
default:
- openpic_cause_IPI(msg, 1<<target);
+ openpic_cause_IPI(msg, cpumask_of_cpu(target));
break;
}
}
/*
- * Author: Pete Popov <ppopov@mvista.com> or source@mvista.com
+ * arch/ppc/kernel/ppc4xx_dma.c
*
- * arch/ppc/kernel/ppc405_dma.c
+ * IBM PPC4xx DMA engine core library
*
- * 2000 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * Copyright 2000-2004 MontaVista Software Inc.
*
- * IBM 405 DMA Controller Functions
+ * Cleaned up and converted to new DCR access
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Original code by Armin Kuster <akuster@mvista.com>
+ * and Pete Popov <ppopov@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/config.h>
#include <linux/kernel.h>
-#include <asm/system.h>
-#include <asm/io.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <asm/ppc405_dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/ppc4xx_dma.h>
+
+ppc_dma_ch_t dma_channels[MAX_PPC4xx_DMA_CHANNELS];
+
+int
+ppc4xx_get_dma_status(void)
+{
+ return (mfdcr(DCRN_DMASR));
+}
+
+void
+ppc4xx_set_src_addr(int dmanr, phys_addr_t src_addr)
+{
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("set_src_addr: bad channel: %d\n", dmanr);
+ return;
+ }
+
+#ifdef PPC4xx_DMA64BIT
+ mtdcr(DCRN_DMASAH0 + dmanr*2, (u32)(src_addr >> 32));
+#else
+ mtdcr(DCRN_DMASA0 + dmanr*2, (u32)src_addr);
+#endif
+}
+
+void
+ppc4xx_set_dst_addr(int dmanr, phys_addr_t dst_addr)
+{
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("set_dst_addr: bad channel: %d\n", dmanr);
+ return;
+ }
+
+#ifdef PPC4xx_DMA64BIT
+ mtdcr(DCRN_DMADAH0 + dmanr*2, (u32)(dst_addr >> 32));
+#else
+ mtdcr(DCRN_DMADA0 + dmanr*2, (u32)dst_addr);
+#endif
+}
+
+void
+ppc4xx_enable_dma(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+ unsigned int status_bits[] = { DMA_CS0 | DMA_TS0 | DMA_CH0_ERR,
+ DMA_CS1 | DMA_TS1 | DMA_CH1_ERR,
+ DMA_CS2 | DMA_TS2 | DMA_CH2_ERR,
+ DMA_CS3 | DMA_TS3 | DMA_CH3_ERR};
+
+ if (p_dma_ch->in_use) {
+ printk("enable_dma: channel %d in use\n", dmanr);
+ return;
+ }
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("enable_dma: bad channel: %d\n", dmanr);
+ return;
+ }
+
+ if (p_dma_ch->mode == DMA_MODE_READ) {
+ /* peripheral to memory */
+ ppc4xx_set_src_addr(dmanr, 0);
+ ppc4xx_set_dst_addr(dmanr, p_dma_ch->addr);
+ } else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+ /* memory to peripheral */
+ ppc4xx_set_src_addr(dmanr, p_dma_ch->addr);
+ ppc4xx_set_dst_addr(dmanr, 0);
+ }
+
+ /* for other xfer modes, the addresses are already set */
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+
+ control &= ~(DMA_TM_MASK | DMA_TD); /* clear all mode bits */
+ if (p_dma_ch->mode == DMA_MODE_MM) {
+ /* software initiated memory to memory */
+ control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
+ }
+
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ /*
+ * Clear the CS, TS, RI bits for the channel from DMASR. This
+ * has been observed to happen correctly only after the mode and
+ * ETD/DCE bits in DMACRx are set above. Must do this before
+ * enabling the channel.
+ */
+
+ mtdcr(DCRN_DMASR, status_bits[dmanr]);
+
+ /*
+ * For device-paced transfers, Terminal Count Enable apparently
+ * must be on, and this must be turned on after the mode, etc.
+ * bits are cleared above (at least on Redwood-6).
+ */
+
+ if ((p_dma_ch->mode == DMA_MODE_MM_DEVATDST) ||
+ (p_dma_ch->mode == DMA_MODE_MM_DEVATSRC))
+ control |= DMA_TCE_ENABLE;
+
+ /*
+ * Now enable the channel.
+ */
+
+ control |= (p_dma_ch->mode | DMA_CE_ENABLE);
+
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ p_dma_ch->in_use = 1;
+}
+
+void
+ppc4xx_disable_dma(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (!p_dma_ch->in_use) {
+ printk("disable_dma: channel %d not in use\n", dmanr);
+ return;
+ }
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("disable_dma: bad channel: %d\n", dmanr);
+ return;
+ }
+
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+ control &= ~DMA_CE_ENABLE;
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ p_dma_ch->in_use = 0;
+}
+
+/*
+ * Sets the dma mode for single DMA transfers only.
+ * For scatter/gather transfers, the mode is passed to the
+ * alloc_dma_handle() function as one of the parameters.
+ *
+ * The mode is simply saved and used later. This allows
+ * the driver to call set_dma_mode() and set_dma_addr() in
+ * any order.
+ *
+ * Valid mode values are:
+ *
+ * DMA_MODE_READ peripheral to memory
+ * DMA_MODE_WRITE memory to peripheral
+ * DMA_MODE_MM memory to memory
+ * DMA_MODE_MM_DEVATSRC device-paced memory to memory, device at src
+ * DMA_MODE_MM_DEVATDST device-paced memory to memory, device at dst
+ */
+int
+ppc4xx_set_dma_mode(unsigned int dmanr, unsigned int mode)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("set_dma_mode: bad channel 0x%x\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ p_dma_ch->mode = mode;
+
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Sets the DMA Count register. Note that 'count' is in bytes.
+ * However, the DMA Count register counts the number of "transfers",
+ * where each transfer is equal to the bus width. Thus, count
+ * MUST be a multiple of the bus width.
+ */
+void
+ppc4xx_set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_4xxDMA
+ {
+ int error = 0;
+ switch (p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (count & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (count & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (count & 0x7)
+ error = 1;
+ break;
+ default:
+ printk("set_dma_count: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk
+ ("Warning: set_dma_count count 0x%x bus width %d\n",
+ count, p_dma_ch->pwidth);
+ }
+#endif
+ count = count >> p_dma_ch->shift;
+
+ mtdcr(DCRN_DMACT0 + (dmanr * 0x8), count);
+}
/*
- * Function prototypes
+ * Returns the number of bytes left to be transfered.
+ * After a DMA transfer, this should return zero.
+ * Reading this while a DMA transfer is still in progress will return
+ * unpredictable results.
*/
+int
+ppc4xx_get_dma_residue(unsigned int dmanr)
+{
+ unsigned int count;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-int hw_init_dma_channel(unsigned int, ppc_dma_ch_t *);
-int init_dma_channel(unsigned int);
-int get_channel_config(unsigned int, ppc_dma_ch_t *);
-int set_channel_priority(unsigned int, unsigned int);
-unsigned int get_peripheral_width(unsigned int);
-int alloc_dma_handle(sgl_handle_t *, unsigned int, unsigned int);
-void free_dma_handle(sgl_handle_t);
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_get_dma_residue: bad channel 0x%x\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ count = mfdcr(DCRN_DMACT0 + (dmanr * 0x8));
-ppc_dma_ch_t dma_channels[MAX_405GP_DMA_CHANNELS];
+ return (count << p_dma_ch->shift);
+}
+
+/*
+ * Sets the DMA address for a memory to peripheral or peripheral
+ * to memory transfer. The address is just saved in the channel
+ * structure for now and used later in enable_dma().
+ */
+void
+ppc4xx_set_dma_addr(unsigned int dmanr, phys_addr_t addr)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_set_dma_addr: bad channel: %d\n", dmanr);
+ return;
+ }
+
+#ifdef DEBUG_4xxDMA
+ {
+ int error = 0;
+ switch (p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if ((unsigned) addr & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if ((unsigned) addr & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if ((unsigned) addr & 0x7)
+ error = 1;
+ break;
+ default:
+ printk("ppc4xx_set_dma_addr: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk("Warning: ppc4xx_set_dma_addr addr 0x%x bus width %d\n",
+ addr, p_dma_ch->pwidth);
+ }
+#endif
+
+ /* save dma address and program it later after we know the xfer mode */
+ p_dma_ch->addr = addr;
+}
+
+/*
+ * Sets both DMA addresses for a memory to memory transfer.
+ * For memory to peripheral or peripheral to memory transfers
+ * the function set_dma_addr() should be used instead.
+ */
+void
+ppc4xx_set_dma_addr2(unsigned int dmanr, phys_addr_t src_dma_addr,
+ phys_addr_t dst_dma_addr)
+{
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_set_dma_addr2: bad channel: %d\n", dmanr);
+ return;
+ }
+
+#ifdef DEBUG_4xxDMA
+ {
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+ int error = 0;
+ switch (p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (((unsigned) src_dma_addr & 0x1) ||
+ ((unsigned) dst_dma_addr & 0x1)
+ )
+ error = 1;
+ break;
+ case PW_32:
+ if (((unsigned) src_dma_addr & 0x3) ||
+ ((unsigned) dst_dma_addr & 0x3)
+ )
+ error = 1;
+ break;
+ case PW_64:
+ if (((unsigned) src_dma_addr & 0x7) ||
+ ((unsigned) dst_dma_addr & 0x7)
+ )
+ error = 1;
+ break;
+ default:
+ printk("ppc4xx_set_dma_addr2: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk
+ ("Warning: ppc4xx_set_dma_addr2 src 0x%x dst 0x%x bus width %d\n",
+ src_dma_addr, dst_dma_addr, p_dma_ch->pwidth);
+ }
+#endif
+
+ ppc4xx_set_src_addr(dmanr, src_dma_addr);
+ ppc4xx_set_dst_addr(dmanr, dst_dma_addr);
+}
+
+/*
+ * Enables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be enabled, if
+ * they were previously disabled.
+ */
+int
+ppc4xx_enable_dma_interrupt(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_enable_dma_interrupt: bad channel: %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ p_dma_ch->int_enable = 1;
+
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+ control |= DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Disables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be disabled, if
+ * they were previously enabled.
+ */
+int
+ppc4xx_disable_dma_interrupt(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_disable_dma_interrupt: bad channel: %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ p_dma_ch->int_enable = 0;
+
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+ control &= ~DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
/*
* Configures a DMA channel, including the peripheral bus width, if a
* called from platform specific init code. The driver should not need to
* call this function.
*/
-int hw_init_dma_channel(unsigned int dmanr, ppc_dma_ch_t *p_init)
+int
+ppc4xx_init_dma_channel(unsigned int dmanr, ppc_dma_ch_t * p_init)
{
- unsigned int polarity;
- uint32_t control = 0;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
-
-#ifdef DEBUG_405DMA
- if (!p_init) {
- printk("hw_init_dma_channel: NULL p_init\n");
- return DMA_STATUS_NULL_POINTER;
- }
- if (dmanr >= MAX_405GP_DMA_CHANNELS) {
- printk("hw_init_dma_channel: bad channel %d\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-#endif
+ unsigned int polarity;
+ uint32_t control = 0;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ DMA_MODE_READ = (unsigned long) DMA_TD; /* Peripheral to Memory */
+ DMA_MODE_WRITE = 0; /* Memory to Peripheral */
+
+ if (!p_init) {
+ printk("ppc4xx_init_dma_channel: NULL p_init\n");
+ return DMA_STATUS_NULL_POINTER;
+ }
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_init_dma_channel: bad channel %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
#if DCRN_POL > 0
- polarity = mfdcr(DCRN_POL);
+ polarity = mfdcr(DCRN_POL);
#else
- polarity = 0;
+ polarity = 0;
#endif
- /* Setup the control register based on the values passed to
- * us in p_init. Then, over-write the control register with this
- * new value.
- */
-
- control |= (
- SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */
- SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */
- SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */
- SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */
- SET_DMA_PL(p_init->pl) | /* peripheral location */
- SET_DMA_DAI(p_init->dai) | /* dest addr increment */
- SET_DMA_SAI(p_init->sai) | /* src addr increment */
- SET_DMA_PRIORITY(p_init->cp) | /* channel priority */
- SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */
- SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */
- SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */
- SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */
- SET_DMA_PREFETCH(p_init->pf) /* read prefetch */
- );
-
- switch (dmanr) {
- case 0:
- /* clear all polarity signals and then "or" in new signal levels */
- polarity &= ~(DMAReq0_ActiveLow | DMAAck0_ActiveLow | EOT0_ActiveLow);
- polarity |= p_dma_ch->polarity;
-#if DCRN_POL > 0
- mtdcr(DCRN_POL, polarity);
-#endif
- mtdcr(DCRN_DMACR0, control);
- break;
- case 1:
- polarity &= ~(DMAReq1_ActiveLow | DMAAck1_ActiveLow | EOT1_ActiveLow);
- polarity |= p_dma_ch->polarity;
-#if DCRN_POL > 0
- mtdcr(DCRN_POL, polarity);
-#endif
- mtdcr(DCRN_DMACR1, control);
- break;
- case 2:
- polarity &= ~(DMAReq2_ActiveLow | DMAAck2_ActiveLow | EOT2_ActiveLow);
- polarity |= p_dma_ch->polarity;
-#if DCRN_POL > 0
- mtdcr(DCRN_POL, polarity);
-#endif
- mtdcr(DCRN_DMACR2, control);
- break;
- case 3:
- polarity &= ~(DMAReq3_ActiveLow | DMAAck3_ActiveLow | EOT3_ActiveLow);
- polarity |= p_dma_ch->polarity;
+ /* Setup the control register based on the values passed to
+ * us in p_init. Then, over-write the control register with this
+ * new value.
+ */
+ control |= SET_DMA_CONTROL;
+
+ /* clear all polarity signals and then "or" in new signal levels */
+ polarity &= ~GET_DMA_POLARITY(dmanr);
+ polarity |= p_dma_ch->polarity;
#if DCRN_POL > 0
- mtdcr(DCRN_POL, polarity);
+ mtdcr(DCRN_POL, polarity);
#endif
- mtdcr(DCRN_DMACR3, control);
- break;
- default:
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- /* save these values in our dma channel structure */
- memcpy(p_dma_ch, p_init, sizeof(ppc_dma_ch_t));
-
- /*
- * The peripheral width values written in the control register are:
- * PW_8 0
- * PW_16 1
- * PW_32 2
- * PW_64 3
- *
- * Since the DMA count register takes the number of "transfers",
- * we need to divide the count sent to us in certain
- * functions by the appropriate number. It so happens that our
- * right shift value is equal to the peripheral width value.
- */
- p_dma_ch->shift = p_init->pwidth;
-
- /*
- * Save the control word for easy access.
- */
- p_dma_ch->control = control;
-
- mtdcr(DCRN_DMASR, 0xffffffff); /* clear status register */
- return DMA_STATUS_GOOD;
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ /* save these values in our dma channel structure */
+ memcpy(p_dma_ch, p_init, sizeof (ppc_dma_ch_t));
+
+ /*
+ * The peripheral width values written in the control register are:
+ * PW_8 0
+ * PW_16 1
+ * PW_32 2
+ * PW_64 3
+ *
+ * Since the DMA count register takes the number of "transfers",
+ * we need to divide the count sent to us in certain
+ * functions by the appropriate number. It so happens that our
+ * right shift value is equal to the peripheral width value.
+ */
+ p_dma_ch->shift = p_init->pwidth;
+
+ /*
+ * Save the control word for easy access.
+ */
+ p_dma_ch->control = control;
+
+ mtdcr(DCRN_DMASR, 0xffffffff); /* clear status register */
+ return DMA_STATUS_GOOD;
}
-
-
-
/*
* This function returns the channel configuration.
*/
-int get_channel_config(unsigned int dmanr, ppc_dma_ch_t *p_dma_ch)
+int
+ppc4xx_get_channel_config(unsigned int dmanr, ppc_dma_ch_t * p_dma_ch)
{
- unsigned int polarity;
- unsigned int control;
+ unsigned int polarity;
+ unsigned int control;
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_get_channel_config: bad channel %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
#if DCRN_POL > 0
- polarity = mfdcr(DCRN_POL);
+ polarity = mfdcr(DCRN_POL);
#else
- polarity = 0;
+ polarity = 0;
#endif
- switch (dmanr) {
- case 0:
- p_dma_ch->polarity =
- polarity & (DMAReq0_ActiveLow | DMAAck0_ActiveLow | EOT0_ActiveLow);
- control = mfdcr(DCRN_DMACR0);
- break;
- case 1:
- p_dma_ch->polarity =
- polarity & (DMAReq1_ActiveLow | DMAAck1_ActiveLow | EOT1_ActiveLow);
- control = mfdcr(DCRN_DMACR1);
- break;
- case 2:
- p_dma_ch->polarity =
- polarity & (DMAReq2_ActiveLow | DMAAck2_ActiveLow | EOT2_ActiveLow);
- control = mfdcr(DCRN_DMACR2);
- break;
- case 3:
- p_dma_ch->polarity =
- polarity & (DMAReq3_ActiveLow | DMAAck3_ActiveLow | EOT3_ActiveLow);
- control = mfdcr(DCRN_DMACR3);
- break;
- default:
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- p_dma_ch->cp = GET_DMA_PRIORITY(control);
- p_dma_ch->pwidth = GET_DMA_PW(control);
- p_dma_ch->psc = GET_DMA_PSC(control);
- p_dma_ch->pwc = GET_DMA_PWC(control);
- p_dma_ch->phc = GET_DMA_PHC(control);
- p_dma_ch->pf = GET_DMA_PREFETCH(control);
- p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
- p_dma_ch->shift = GET_DMA_PW(control);
-
- return DMA_STATUS_GOOD;
+ p_dma_ch->polarity = polarity & GET_DMA_POLARITY(dmanr);
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+
+ p_dma_ch->cp = GET_DMA_PRIORITY(control);
+ p_dma_ch->pwidth = GET_DMA_PW(control);
+ p_dma_ch->psc = GET_DMA_PSC(control);
+ p_dma_ch->pwc = GET_DMA_PWC(control);
+ p_dma_ch->phc = GET_DMA_PHC(control);
+ p_dma_ch->ce = GET_DMA_CE_ENABLE(control);
+ p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
+ p_dma_ch->shift = GET_DMA_PW(control);
+
+#ifdef CONFIG_PPC4xx_EDMA
+ p_dma_ch->pf = GET_DMA_PREFETCH(control);
+#else
+ p_dma_ch->ch_enable = GET_DMA_CH(control);
+ p_dma_ch->ece_enable = GET_DMA_ECE(control);
+ p_dma_ch->tcd_disable = GET_DMA_TCD(control);
+#endif
+ return DMA_STATUS_GOOD;
}
/*
* PRIORITY_HIGH
*
*/
-int set_channel_priority(unsigned int dmanr, unsigned int priority)
+int
+ppc4xx_set_channel_priority(unsigned int dmanr, unsigned int priority)
{
- unsigned int control;
-
-#ifdef DEBUG_405DMA
- if ( (priority != PRIORITY_LOW) &&
- (priority != PRIORITY_MID_LOW) &&
- (priority != PRIORITY_MID_HIGH) &&
- (priority != PRIORITY_HIGH)) {
- printk("set_channel_priority: bad priority: 0x%x\n", priority);
- }
-#endif
+ unsigned int control;
- switch (dmanr) {
- case 0:
- control = mfdcr(DCRN_DMACR0);
- control|= SET_DMA_PRIORITY(priority);
- mtdcr(DCRN_DMACR0, control);
- break;
- case 1:
- control = mfdcr(DCRN_DMACR1);
- control|= SET_DMA_PRIORITY(priority);
- mtdcr(DCRN_DMACR1, control);
- break;
- case 2:
- control = mfdcr(DCRN_DMACR2);
- control|= SET_DMA_PRIORITY(priority);
- mtdcr(DCRN_DMACR2, control);
- break;
- case 3:
- control = mfdcr(DCRN_DMACR3);
- control|= SET_DMA_PRIORITY(priority);
- mtdcr(DCRN_DMACR3, control);
- break;
- default:
-#ifdef DEBUG_405DMA
- printk("set_channel_priority: bad channel: %d\n", dmanr);
-#endif
- return DMA_STATUS_BAD_CHANNEL;
- }
- return DMA_STATUS_GOOD;
-}
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_set_channel_priority: bad channel %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ if ((priority != PRIORITY_LOW) &&
+ (priority != PRIORITY_MID_LOW) &&
+ (priority != PRIORITY_MID_HIGH) && (priority != PRIORITY_HIGH)) {
+ printk("ppc4xx_set_channel_priority: bad priority: 0x%x\n", priority);
+ }
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+ control |= SET_DMA_PRIORITY(priority);
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+ return DMA_STATUS_GOOD;
+}
/*
* Returns the width of the peripheral attached to this channel. This assumes
*
* The function returns 0 on error.
*/
-unsigned int get_peripheral_width(unsigned int dmanr)
+unsigned int
+ppc4xx_get_peripheral_width(unsigned int dmanr)
{
- unsigned int control;
-
- switch (dmanr) {
- case 0:
- control = mfdcr(DCRN_DMACR0);
- break;
- case 1:
- control = mfdcr(DCRN_DMACR1);
- break;
- case 2:
- control = mfdcr(DCRN_DMACR2);
- break;
- case 3:
- control = mfdcr(DCRN_DMACR3);
- break;
- default:
-#ifdef DEBUG_405DMA
- printk("get_peripheral_width: bad channel: %d\n", dmanr);
-#endif
- return 0;
- }
- return(GET_DMA_PW(control));
-}
-
-
-
-
-/*
- * Create a scatter/gather list handle. This is simply a structure which
- * describes a scatter/gather list.
- *
- * A handle is returned in "handle" which the driver should save in order to
- * be able to access this list later. A chunk of memory will be allocated
- * to be used by the API for internal management purposes, including managing
- * the sg list and allocating memory for the sgl descriptors. One page should
- * be more than enough for that purpose. Perhaps it's a bit wasteful to use
- * a whole page for a single sg list, but most likely there will be only one
- * sg list per channel.
- *
- * Interrupt notes:
- * Each sgl descriptor has a copy of the DMA control word which the DMA engine
- * loads in the control register. The control word has a "global" interrupt
- * enable bit for that channel. Interrupts are further qualified by a few bits
- * in the sgl descriptor count register. In order to setup an sgl, we have to
- * know ahead of time whether or not interrupts will be enabled at the completion
- * of the transfers. Thus, enable_dma_interrupt()/disable_dma_interrupt() MUST
- * be called before calling alloc_dma_handle(). If the interrupt mode will never
- * change after powerup, then enable_dma_interrupt()/disable_dma_interrupt()
- * do not have to be called -- interrupts will be enabled or disabled based
- * on how the channel was configured after powerup by the hw_init_dma_channel()
- * function. Each sgl descriptor will be setup to interrupt if an error occurs;
- * however, only the last descriptor will be setup to interrupt. Thus, an
- * interrupt will occur (if interrupts are enabled) only after the complete
- * sgl transfer is done.
- */
-int alloc_dma_handle(sgl_handle_t *phandle, unsigned int mode, unsigned int dmanr)
-{
- sgl_list_info_t *psgl;
- dma_addr_t dma_addr;
- ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
- uint32_t sg_command;
- void *ret;
-
-#ifdef DEBUG_405DMA
- if (!phandle) {
- printk("alloc_dma_handle: null handle pointer\n");
- return DMA_STATUS_NULL_POINTER;
- }
- switch (mode) {
- case DMA_MODE_READ:
- case DMA_MODE_WRITE:
- case DMA_MODE_MM:
- case DMA_MODE_MM_DEVATSRC:
- case DMA_MODE_MM_DEVATDST:
- break;
- default:
- printk("alloc_dma_handle: bad mode 0x%x\n", mode);
- return DMA_STATUS_BAD_MODE;
- }
- if (dmanr >= MAX_405GP_DMA_CHANNELS) {
- printk("alloc_dma_handle: invalid channel 0x%x\n", dmanr);
- return DMA_STATUS_BAD_CHANNEL;
- }
-#endif
+ unsigned int control;
- /* Get a page of memory, which is zeroed out by pci_alloc_consistent() */
-
-/* wrong not a pci device - armin */
- /* psgl = (sgl_list_info_t *) pci_alloc_consistent(NULL, SGL_LIST_SIZE, &dma_addr);
-*/
-
- ret = consistent_alloc(GFP_ATOMIC |GFP_DMA, SGL_LIST_SIZE, &dma_addr);
- if (ret != NULL) {
- memset(ret, 0,SGL_LIST_SIZE );
- psgl = (sgl_list_info_t *) ret;
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_get_peripheral_width: bad channel %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
}
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
- if (psgl == NULL) {
- *phandle = (sgl_handle_t)NULL;
- return DMA_STATUS_OUT_OF_MEMORY;
- }
-
- psgl->dma_addr = dma_addr;
- psgl->dmanr = dmanr;
-
- /*
- * Modify and save the control word. These word will get written to each sgl
- * descriptor. The DMA engine then loads this control word into the control
- * register every time it reads a new descriptor.
- */
- psgl->control = p_dma_ch->control;
- psgl->control &= ~(DMA_TM_MASK | DMA_TD); /* clear all "mode" bits first */
- psgl->control |= (mode | DMA_CH_ENABLE); /* save the control word along with the mode */
-
- if (p_dma_ch->int_enable) {
- psgl->control |= DMA_CIE_ENABLE; /* channel interrupt enabled */
- }
- else {
- psgl->control &= ~DMA_CIE_ENABLE;
- }
-
-#if DCRN_ASGC > 0
- sg_command = mfdcr(DCRN_ASGC);
- switch (dmanr) {
- case 0:
- sg_command |= SSG0_MASK_ENABLE;
- break;
- case 1:
- sg_command |= SSG1_MASK_ENABLE;
- break;
- case 2:
- sg_command |= SSG2_MASK_ENABLE;
- break;
- case 3:
- sg_command |= SSG3_MASK_ENABLE;
- break;
- default:
-#ifdef DEBUG_405DMA
- printk("alloc_dma_handle: bad channel: %d\n", dmanr);
-#endif
- free_dma_handle((sgl_handle_t)psgl);
- *phandle = (sgl_handle_t)NULL;
- return DMA_STATUS_BAD_CHANNEL;
- }
-
- mtdcr(DCRN_ASGC, sg_command); /* enable writing to this channel's sgl control bits */
-#else
- (void)sg_command;
-#endif
- psgl->sgl_control = SG_ERI_ENABLE | SG_LINK; /* sgl descriptor control bits */
-
- if (p_dma_ch->int_enable) {
- if (p_dma_ch->tce_enable)
- psgl->sgl_control |= SG_TCI_ENABLE;
- else
- psgl->sgl_control |= SG_ETI_ENABLE;
- }
-
- *phandle = (sgl_handle_t)psgl;
- return DMA_STATUS_GOOD;
-}
-
-
-
-/*
- * Destroy a scatter/gather list handle that was created by alloc_dma_handle().
- * The list must be empty (contain no elements).
- */
-void free_dma_handle(sgl_handle_t handle)
-{
- sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
-
- if (!handle) {
-#ifdef DEBUG_405DMA
- printk("free_dma_handle: got NULL\n");
-#endif
- return;
- }
- else if (psgl->phead) {
-#ifdef DEBUG_405DMA
- printk("free_dma_handle: list not empty\n");
-#endif
- return;
- }
- else if (!psgl->dma_addr) { /* should never happen */
-#ifdef DEBUG_405DMA
- printk("free_dma_handle: no dma address\n");
-#endif
- return;
- }
-
- /* wrong not a PCI device -armin */
- /* pci_free_consistent(NULL, SGL_LIST_SIZE, (void *)psgl, psgl->dma_addr); */
- // free_pages((unsigned long)psgl, get_order(SGL_LIST_SIZE));
- consistent_free((void *)psgl);
-
-
+ return (GET_DMA_PW(control));
}
-EXPORT_SYMBOL(hw_init_dma_channel);
-EXPORT_SYMBOL(get_channel_config);
-EXPORT_SYMBOL(set_channel_priority);
-EXPORT_SYMBOL(get_peripheral_width);
-EXPORT_SYMBOL(alloc_dma_handle);
-EXPORT_SYMBOL(free_dma_handle);
+EXPORT_SYMBOL(ppc4xx_init_dma_channel);
+EXPORT_SYMBOL(ppc4xx_get_channel_config);
+EXPORT_SYMBOL(ppc4xx_set_channel_priority);
+EXPORT_SYMBOL(ppc4xx_get_peripheral_width);
EXPORT_SYMBOL(dma_channels);
+EXPORT_SYMBOL(ppc4xx_set_src_addr);
+EXPORT_SYMBOL(ppc4xx_set_dst_addr);
+EXPORT_SYMBOL(ppc4xx_set_dma_addr);
+EXPORT_SYMBOL(ppc4xx_set_dma_addr2);
+EXPORT_SYMBOL(ppc4xx_enable_dma);
+EXPORT_SYMBOL(ppc4xx_disable_dma);
+EXPORT_SYMBOL(ppc4xx_set_dma_mode);
+EXPORT_SYMBOL(ppc4xx_set_dma_count);
+EXPORT_SYMBOL(ppc4xx_get_dma_residue);
+EXPORT_SYMBOL(ppc4xx_enable_dma_interrupt);
+EXPORT_SYMBOL(ppc4xx_disable_dma_interrupt);
+EXPORT_SYMBOL(ppc4xx_get_dma_status);
pci->piwar2 = 0;
pci->piwar3 = 0;
- /* Setup 512M Phys:PCI 1:1 outbound mem window @ 0x80000000 */
+ /* Setup Phys:PCI 1:1 outbound mem window @ MPC85XX_PCI1_LOWER_MEM */
pci->potar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
pci->potear1 = 0x00000000;
pci->powbar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
- pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */
+ /* Enable, Mem R/W */
+ pci->powar1 = 0x80044000 |
+ (__ilog2(MPC85XX_PCI1_UPPER_MEM - MPC85XX_PCI1_LOWER_MEM + 1) - 1);
- /* Setup 16M outboud IO windows @ 0xe2000000 */
+ /* Setup outboud IO windows @ MPC85XX_PCI1_IO_BASE */
pci->potar2 = 0x00000000;
pci->potear2 = 0x00000000;
pci->powbar2 = (MPC85XX_PCI1_IO_BASE >> 12) & 0x000fffff;
- pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */
+ /* Enable, IO R/W */
+ pci->powar2 = 0x80088000 | (__ilog2(MPC85XX_PCI1_IO_SIZE) - 1);
/* Setup 2G inbound Memory Window @ 0 */
pci->pitar1 = 0x00000000;
extern int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin);
extern int mpc85xx_exclude_device(u_char bus, u_char devfn);
-#if CONFIG_85xx_PCI2
+#ifdef CONFIG_85xx_PCI2
static void __init
mpc85xx_setup_pci2(struct pci_controller *hose)
{
pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI2_OFFSET,
MPC85xx_PCI2_SIZE);
- early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps);
+ early_read_config_word(hose, hose->bus_offset, 0, PCI_COMMAND, &temps);
temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
- early_write_config_word(hose, 0, 0, PCI_COMMAND, temps);
- early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
+ early_write_config_word(hose, hose->bus_offset, 0, PCI_COMMAND, temps);
+ early_write_config_byte(hose, hose->bus_offset, 0, PCI_LATENCY_TIMER, 0x80);
/* Disable all windows (except powar0 since its ignored) */
pci->powar1 = 0;
pci->piwar2 = 0;
pci->piwar3 = 0;
- /* Setup 512M Phys:PCI 1:1 outbound mem window @ 0xa0000000 */
+ /* Setup Phys:PCI 1:1 outbound mem window @ MPC85XX_PCI2_LOWER_MEM */
pci->potar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
pci->potear1 = 0x00000000;
pci->powbar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
- pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */
+ /* Enable, Mem R/W */
+ pci->powar1 = 0x80044000 |
+ (__ilog2(MPC85XX_PCI1_UPPER_MEM - MPC85XX_PCI1_LOWER_MEM + 1) - 1);
- /* Setup 16M outboud IO windows @ 0xe3000000 */
+ /* Setup outboud IO windows @ MPC85XX_PCI2_IO_BASE */
pci->potar2 = 0x00000000;
pci->potear2 = 0x00000000;
pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff;
- pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */
+ /* Enable, IO R/W */
+ pci->powar2 = 0x80088000 | (__ilog2(MPC85XX_PCI1_IO_SIZE) - 1);
/* Setup 2G inbound Memory Window @ 0 */
pci->pitar1 = 0x00000000;
#define __PPC_SYSLIB_PPC85XX_SETUP_H
#include <linux/config.h>
-#include <linux/serial.h>
#include <linux/init.h>
#include <asm/ppcboot.h>
/* All newworld pmac machines and CHRPs now use the interrupt tree */
for (np = allnodes; np != NULL; np = np->allnext) {
- if (get_property(np, "interrupt-parent", 0)) {
+ if (get_property(np, "interrupt-parent", NULL)) {
use_of_interrupt_tree = 1;
break;
}
struct device_node *child;
int *ip;
- np->name = get_property(np, "name", 0);
- np->type = get_property(np, "device_type", 0);
+ np->name = get_property(np, "name", NULL);
+ np->type = get_property(np, "device_type", NULL);
if (!np->name)
np->name = "<NULL>";
mem_start = finish_node_interrupts(np, mem_start);
/* Look for #address-cells and #size-cells properties. */
- ip = (int *) get_property(np, "#address-cells", 0);
+ ip = (int *) get_property(np, "#address-cells", NULL);
if (ip != NULL)
naddrc = *ip;
- ip = (int *) get_property(np, "#size-cells", 0);
+ ip = (int *) get_property(np, "#size-cells", NULL);
if (ip != NULL)
nsizec = *ip;
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#address-cells", 0);
+ ip = (int *) get_property(np, "#address-cells", NULL);
if (ip != NULL)
return *ip;
} while (np->parent);
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#size-cells", 0);
+ ip = (int *) get_property(np, "#size-cells", NULL);
if (ip != NULL)
return *ip;
} while (np->parent);
prevp = &np->next;
}
}
- *prevp = 0;
+ *prevp = NULL;
return head;
}
prevp = &np->next;
}
}
- *prevp = 0;
+ *prevp = NULL;
return head;
}
*prevp = np;
prevp = &np->next;
}
- *prevp = 0;
+ *prevp = NULL;
return head;
}
prevp = &np->next;
}
}
- *prevp = 0;
+ *prevp = NULL;
return head;
}
*lenp = pp->length;
return pp->value;
}
- return 0;
+ return NULL;
}
/*
static void * early_get_property(unsigned long base, unsigned long node,
char *prop);
-prom_entry prom __initdata = 0;
-ihandle prom_chosen __initdata = 0;
-ihandle prom_stdout __initdata = 0;
+prom_entry prom __initdata;
+ihandle prom_chosen __initdata;
+ihandle prom_stdout __initdata;
-char *prom_display_paths[FB_MAX] __initdata = { 0, };
+char *prom_display_paths[FB_MAX] __initdata;
phandle prom_display_nodes[FB_MAX] __initdata;
-unsigned int prom_num_displays __initdata = 0;
-char *of_stdout_device __initdata = 0;
-static ihandle prom_disp_node __initdata = 0;
+unsigned int prom_num_displays __initdata;
+char *of_stdout_device __initdata;
+static ihandle prom_disp_node __initdata;
unsigned int rtas_data; /* physical pointer */
unsigned int rtas_entry; /* physical pointer */
prom_args.args[i] = va_arg(list, void *);
va_end(list);
for (i = 0; i < nret; ++i)
- prom_args.args[i + nargs] = 0;
+ prom_args.args[i + nargs] = NULL;
prom(&prom_args);
return prom_args.args[nargs];
}
prom_args.args[i] = va_arg(list, void *);
va_end(list);
for (i = 0; i < nret; ++i)
- prom_args.args[i + nargs] = 0;
+ prom_args.args[i + nargs] = NULL;
prom(&prom_args);
for (i = 1; i < nret; ++i)
rets[i-1] = prom_args.args[nargs + i];
};
const unsigned char *clut;
- prom_disp_node = 0;
+ prom_disp_node = NULL;
- for (node = 0; prom_next_node(&node); ) {
+ for (node = NULL; prom_next_node(&node); ) {
type[0] = 0;
call_prom("getprop", 4, 1, node, "device_type",
type, sizeof(type));
}
allnextp = &allnodes;
mem_start = ALIGNUL(mem_start);
- new_start = inspect_node(root, 0, mem_start, mem_end, &allnextp);
- *allnextp = 0;
+ new_start = inspect_node(root, NULL, mem_start, mem_end, &allnextp);
+ *allnextp = NULL;
return new_start;
}
/* look for cpus */
*(unsigned long *)(0x0) = 0;
asm volatile("dcbf 0,%0": : "r" (0) : "memory");
- for (node = 0; prom_next_node(&node); ) {
+ for (node = NULL; prom_next_node(&node); ) {
type[0] = 0;
call_prom("getprop", 4, 1, node, "device_type",
type, sizeof(type));
prom_print("returning 0x");
prom_print_hex(phys);
prom_print("from prom_init\n");
- prom_stdout = 0;
+ prom_stdout = NULL;
return phys;
}
return (void *)((unsigned long)pp->value + base);
}
}
- return 0;
+ return NULL;
}
/* Is boot-info compatible ? */
boot_infos = PTRUNRELOC(bi);
if (!BOOT_INFO_IS_V2_COMPATIBLE(bi))
- bi->logicalDisplayBase = 0;
+ bi->logicalDisplayBase = NULL;
#ifdef CONFIG_BOOTX_TEXT
btext_init(bi);
/* The zero index is used to indicate the end of the list of
operands. */
#define UNUSED (0)
- { 0, 0, 0, 0, 0 },
+ { 0, 0, NULL, NULL, 0 },
/* The BA field in an XL form instruction. */
#define BA (1)
#define BA_MASK (0x1f << 16)
- { 5, 16, 0, 0, PPC_OPERAND_CR },
+ { 5, 16, NULL, NULL, PPC_OPERAND_CR },
/* The BA field in an XL form instruction when it must be the same
as the BT field in the same instruction. */
/* The BB field in an XL form instruction. */
#define BB (3)
#define BB_MASK (0x1f << 11)
- { 5, 11, 0, 0, PPC_OPERAND_CR },
+ { 5, 11, NULL, NULL, PPC_OPERAND_CR },
/* The BB field in an XL form instruction when it must be the same
as the BA field in the same instruction. */
/* The BF field in an X or XL form instruction. */
#define BF (11)
- { 3, 23, 0, 0, PPC_OPERAND_CR },
+ { 3, 23, NULL, NULL, PPC_OPERAND_CR },
/* An optional BF field. This is used for comparison instructions,
in which an omitted BF field is taken as zero. */
#define OBF (12)
- { 3, 23, 0, 0, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
+ { 3, 23, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
/* The BFA field in an X or XL form instruction. */
#define BFA (13)
- { 3, 18, 0, 0, PPC_OPERAND_CR },
+ { 3, 18, NULL, NULL, PPC_OPERAND_CR },
/* The BI field in a B form or XL form instruction. */
#define BI (14)
#define BI_MASK (0x1f << 16)
- { 5, 16, 0, 0, PPC_OPERAND_CR },
+ { 5, 16, NULL, NULL, PPC_OPERAND_CR },
/* The BO field in a B form instruction. Certain values are
illegal. */
/* The BT field in an X or XL form instruction. */
#define BT (17)
- { 5, 21, 0, 0, PPC_OPERAND_CR },
+ { 5, 21, NULL, NULL, PPC_OPERAND_CR },
/* The condition register number portion of the BI field in a B form
or XL form instruction. This is used for the extended
conditional branch mnemonics, which set the lower two bits of the
BI field. This field is optional. */
#define CR (18)
- { 3, 18, 0, 0, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
+ { 3, 18, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
/* The D field in a D form instruction. This is a displacement off
a register, and implies that the next operand is a register in
parentheses. */
#define D (19)
- { 16, 0, 0, 0, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
+ { 16, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
/* The DS field in a DS form instruction. This is like D, but the
lower two bits are forced to zero. */
/* The FL1 field in a POWER SC form instruction. */
#define FL1 (21)
- { 4, 12, 0, 0, 0 },
+ { 4, 12, NULL, NULL, 0 },
/* The FL2 field in a POWER SC form instruction. */
#define FL2 (22)
- { 3, 2, 0, 0, 0 },
+ { 3, 2, NULL, NULL, 0 },
/* The FLM field in an XFL form instruction. */
#define FLM (23)
- { 8, 17, 0, 0, 0 },
+ { 8, 17, NULL, NULL, 0 },
/* The FRA field in an X or A form instruction. */
#define FRA (24)
#define FRA_MASK (0x1f << 16)
- { 5, 16, 0, 0, PPC_OPERAND_FPR },
+ { 5, 16, NULL, NULL, PPC_OPERAND_FPR },
/* The FRB field in an X or A form instruction. */
#define FRB (25)
#define FRB_MASK (0x1f << 11)
- { 5, 11, 0, 0, PPC_OPERAND_FPR },
+ { 5, 11, NULL, NULL, PPC_OPERAND_FPR },
/* The FRC field in an A form instruction. */
#define FRC (26)
#define FRC_MASK (0x1f << 6)
- { 5, 6, 0, 0, PPC_OPERAND_FPR },
+ { 5, 6, NULL, NULL, PPC_OPERAND_FPR },
/* The FRS field in an X form instruction or the FRT field in a D, X
or A form instruction. */
#define FRS (27)
#define FRT (FRS)
- { 5, 21, 0, 0, PPC_OPERAND_FPR },
+ { 5, 21, NULL, NULL, PPC_OPERAND_FPR },
/* The FXM field in an XFX instruction. */
#define FXM (28)
#define FXM_MASK (0xff << 12)
- { 8, 12, 0, 0, 0 },
+ { 8, 12, NULL, NULL, 0 },
/* The L field in a D or X form instruction. */
#define L (29)
- { 1, 21, 0, 0, PPC_OPERAND_OPTIONAL },
+ { 1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
/* The LEV field in a POWER SC form instruction. */
#define LEV (30)
- { 7, 5, 0, 0, 0 },
+ { 7, 5, NULL, NULL, 0 },
/* The LI field in an I form instruction. The lower two bits are
forced to zero. */
/* The MB field in an M form instruction. */
#define MB (33)
#define MB_MASK (0x1f << 6)
- { 5, 6, 0, 0, 0 },
+ { 5, 6, NULL, NULL, 0 },
/* The ME field in an M form instruction. */
#define ME (34)
#define ME_MASK (0x1f << 1)
- { 5, 1, 0, 0, 0 },
+ { 5, 1, NULL, NULL, 0 },
/* The MB and ME fields in an M form instruction expressed a single
operand which is a bitmask indicating which bits to select. This
is a two operand form using PPC_OPERAND_NEXT. See the
description in opcode/ppc.h for what this means. */
#define MBE (35)
- { 5, 6, 0, 0, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT },
+ { 5, 6, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT },
{ 32, 0, insert_mbe, extract_mbe, 0 },
/* The MB or ME field in an MD or MDS form instruction. The high
/* The RA field in an D, DS, X, XO, M, or MDS form instruction. */
#define RA (40)
#define RA_MASK (0x1f << 16)
- { 5, 16, 0, 0, PPC_OPERAND_GPR },
+ { 5, 16, NULL, NULL, PPC_OPERAND_GPR },
/* The RA field in a D or X form instruction which is an updating
load, which means that the RA field may not be zero and may not
equal the RT field. */
#define RAL (41)
- { 5, 16, insert_ral, 0, PPC_OPERAND_GPR },
+ { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR },
/* The RA field in an lmw instruction, which has special value
restrictions. */
#define RAM (42)
- { 5, 16, insert_ram, 0, PPC_OPERAND_GPR },
+ { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR },
/* The RA field in a D or X form instruction which is an updating
store or an updating floating point load, which means that the RA
field may not be zero. */
#define RAS (43)
- { 5, 16, insert_ras, 0, PPC_OPERAND_GPR },
+ { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR },
/* The RB field in an X, XO, M, or MDS form instruction. */
#define RB (44)
#define RB_MASK (0x1f << 11)
- { 5, 11, 0, 0, PPC_OPERAND_GPR },
+ { 5, 11, NULL, NULL, PPC_OPERAND_GPR },
/* The RB field in an X form instruction when it must be the same as
the RS field in the instruction. This is used for extended
#define RS (46)
#define RT (RS)
#define RT_MASK (0x1f << 21)
- { 5, 21, 0, 0, PPC_OPERAND_GPR },
+ { 5, 21, NULL, NULL, PPC_OPERAND_GPR },
/* The SH field in an X or M form instruction. */
#define SH (47)
#define SH_MASK (0x1f << 11)
- { 5, 11, 0, 0, 0 },
+ { 5, 11, NULL, NULL, 0 },
/* The SH field in an MD form instruction. This is split. */
#define SH6 (48)
/* The SI field in a D form instruction. */
#define SI (49)
- { 16, 0, 0, 0, PPC_OPERAND_SIGNED },
+ { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED },
/* The SI field in a D form instruction when we accept a wide range
of positive values. */
#define SISIGNOPT (50)
- { 16, 0, 0, 0, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT },
+ { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT },
/* The SPR field in an XFX form instruction. This is flipped--the
lower 5 bits are stored in the upper 5 and vice- versa. */
/* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */
#define SPRBAT (52)
#define SPRBAT_MASK (0x3 << 17)
- { 2, 17, 0, 0, 0 },
+ { 2, 17, NULL, NULL, 0 },
/* The SPRG register number in an XFX form m[ft]sprg instruction. */
#define SPRG (53)
#define SPRG_MASK (0x3 << 16)
- { 2, 16, 0, 0, 0 },
+ { 2, 16, NULL, NULL, 0 },
/* The SR field in an X form instruction. */
#define SR (54)
- { 4, 16, 0, 0, 0 },
+ { 4, 16, NULL, NULL, 0 },
/* The SV field in a POWER SC form instruction. */
#define SV (55)
- { 14, 2, 0, 0, 0 },
+ { 14, 2, NULL, NULL, 0 },
/* The TBR field in an XFX form instruction. This is like the SPR
field, but it is optional. */
/* The TO field in a D or X form instruction. */
#define TO (57)
#define TO_MASK (0x1f << 21)
- { 5, 21, 0, 0, 0 },
+ { 5, 21, NULL, NULL, 0 },
/* The U field in an X form instruction. */
#define U (58)
- { 4, 12, 0, 0, 0 },
+ { 4, 12, NULL, NULL, 0 },
/* The UI field in a D form instruction. */
#define UI (59)
- { 16, 0, 0, 0, 0 },
+ { 16, 0, NULL, NULL, 0 },
};
/* The functions used to insert and extract complicated operands. */
scc_initialized = 1;
if (via_modem) {
for (;;) {
- xmon_write(0, "ATE1V1\r", 7);
+ xmon_write(NULL, "ATE1V1\r", 7);
if (xmon_expect("OK", 5)) {
- xmon_write(0, "ATA\r", 4);
+ xmon_write(NULL, "ATA\r", 4);
if (xmon_expect("CONNECT", 40))
break;
}
- xmon_write(0, "+++", 3);
+ xmon_write(NULL, "+++", 3);
xmon_expect("OK", 3);
}
}
c = xmon_getchar();
if (c == -1) {
if (p == str)
- return 0;
+ return NULL;
break;
}
*p++ = c;
set_backlight_level(BACKLIGHT_MAX);
sync();
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
#endif /* CONFIG_PMAC_BACKLIGHT */
cmd = cmds(excp);
if (cmd == 's') {
insert_bpts();
}
xmon_leave();
- xmon_regs[smp_processor_id()] = 0;
+ xmon_regs[smp_processor_id()] = NULL;
#ifdef CONFIG_SMP
clear_bit(0, &got_xmon);
clear_bit(smp_processor_id(), &cpus_in_xmon);
for (i = 0; i < NBPTS; ++i, ++bp)
if (bp->enabled && pc == bp->address)
return bp;
- return 0;
+ return NULL;
}
static void
xmon_puts(sysmap);
sync();
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
}
else
printf("No System.map\n");
__delay(200);
n = size;
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
return n;
}
} else {
printf("*** Error writing address %x\n", adrs + n);
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
return n;
}
} else {
printf("*** %x exception occurred\n", fault_except);
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
}
/* Input scanning routines */
} while (cur);
sync();
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
termch = 0;
break;
}
*(ep++) = 0;
if (saddr)
*saddr = prev;
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
return rbuffer;
}
prev = next;
bail:
sync();
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
return NULL;
}
}
sync();
}
- debugger_fault_handler = 0;
+ debugger_fault_handler = NULL;
return result;
}
for handling hard and soft interrupts. This can help avoid
overflowing the process kernel stacks.
-endmenu
-
config SPINLINE
bool "Inline spinlock code at each call site"
depends on SMP && !PPC_SPLPAR && !PPC_ISERIES
If in doubt, say N.
+endmenu
+
source "security/Kconfig"
source "crypto/Kconfig"
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
-#include <asm/processor.h>
.globl __div64_32
__div64_32:
obj-$(CONFIG_PPC_OF) += of_device.o
-obj-$(CONFIG_PCI) += pci.o pci_dn.o pci_iommu.o
+pci-obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o \
+ iSeries_IoMmTable.o
+pci-obj-$(CONFIG_PPC_PSERIES) += pci_dn.o pci_dma_direct.o
-ifdef CONFIG_PPC_ISERIES
-obj-$(CONFIG_PCI) += iSeries_pci.o iSeries_pci_reset.o \
- iSeries_IoMmTable.o
-else
-obj-$(CONFIG_PCI) += pci_dma_direct.o
-endif
+obj-$(CONFIG_PCI) += pci.o pci_iommu.o $(pci-obj-y)
obj-$(CONFIG_PPC_ISERIES) += iSeries_irq.o \
iSeries_VpdInfo.o XmPciLpEvent.o \
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
+obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \
pmac_time.o pmac_nvram.o pmac_low_i2c.o \
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
- DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_next_rr));
+ DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
+ DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
+ DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
+ DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
+ DEFINE(PACASLBR3, offsetof(struct paca_struct, slb_r3));
+#ifdef CONFIG_HUGETLB_PAGE
+ DEFINE(PACAHTLBSEGS, offsetof(struct paca_struct, context.htlb_segs));
+#endif /* CONFIG_HUGETLB_PAGE */
DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
DEFINE(PACAPROFENABLED, offsetof(struct paca_struct, prof_enabled));
DEFINE(PACAPROFLEN, offsetof(struct paca_struct, prof_len));
cur_cpu_spec->firmware_features);
}
-void
-chrp_progress(char *s, unsigned short hex)
+void chrp_progress(char *s, unsigned short hex)
{
struct device_node *root;
int width, *p;
return;
if (max_width == 0) {
- if ( (root = find_path_device("/rtas")) &&
+ if ((root = find_path_device("/rtas")) &&
(p = (unsigned int *)get_property(root,
"ibm,display-line-length",
- NULL)) )
+ NULL)))
max_width = *p;
else
max_width = 0x10;
display_character = rtas_token("display-character");
set_indicator = rtas_token("set-indicator");
}
- if (display_character == RTAS_UNKNOWN_SERVICE) {
- /* use hex display */
- if (set_indicator == RTAS_UNKNOWN_SERVICE)
- return;
- rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
- return;
- }
- if(display_character == RTAS_UNKNOWN_SERVICE) {
+ if (display_character == RTAS_UNKNOWN_SERVICE) {
/* use hex display if available */
- if(set_indicator != RTAS_UNKNOWN_SERVICE)
+ if (set_indicator != RTAS_UNKNOWN_SERVICE)
rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
return;
}
spin_lock(&progress_lock);
- /* Last write ended with newline, but we didn't print it since
+ /*
+ * Last write ended with newline, but we didn't print it since
* it would just clear the bottom line of output. Print it now
* instead.
*
* If no newline is pending, print a CR to start output at the
* beginning of the line.
*/
- if(pending_newline) {
+ if (pending_newline) {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
pending_newline = 0;
- } else
+ } else {
rtas_call(display_character, 1, 1, NULL, '\r');
+ }
width = max_width;
os = s;
while (*os) {
- if(*os == '\n' || *os == '\r') {
+ if (*os == '\n' || *os == '\r') {
/* Blank to end of line. */
- while(width-- > 0)
+ while (width-- > 0)
rtas_call(display_character, 1, 1, NULL, ' ');
/* If newline is the last character, save it
* until next call to avoid bumping up the
* display output.
*/
- if(*os == '\n' && !os[1]) {
+ if (*os == '\n' && !os[1]) {
pending_newline = 1;
spin_unlock(&progress_lock);
return;
/* RTAS wants CR-LF, not just LF */
- if(*os == '\n') {
+ if (*os == '\n') {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
} else {
os++;
/* if we overwrite the screen length */
- if ( width <= 0 )
- while ( (*os != 0) && (*os != '\n') && (*os != '\r') )
+ if (width <= 0)
+ while ((*os != 0) && (*os != '\n') && (*os != '\r'))
os++;
}
/* Blank to end of line. */
- while ( width-- > 0 )
- rtas_call(display_character, 1, 1, NULL, ' ' );
+ while (width-- > 0)
+ rtas_call(display_character, 1, 1, NULL, ' ');
spin_unlock(&progress_lock);
}
{
struct eeh_early_enable_info *info = data;
int ret;
- char *status = get_property(dn, "status", 0);
- u32 *class_code = (u32 *)get_property(dn, "class-code", 0);
- u32 *vendor_id = (u32 *)get_property(dn, "vendor-id", 0);
- u32 *device_id = (u32 *)get_property(dn, "device-id", 0);
+ char *status = get_property(dn, "status", NULL);
+ u32 *class_code = (u32 *)get_property(dn, "class-code", NULL);
+ u32 *vendor_id = (u32 *)get_property(dn, "vendor-id", NULL);
+ u32 *device_id = (u32 *)get_property(dn, "device-id", NULL);
u32 *regs;
int enable;
/* Ok... see if this device supports EEH. Some do, some don't,
* and the only way to find out is to check each and every one. */
- regs = (u32 *)get_property(dn, "reg", 0);
+ regs = (u32 *)get_property(dn, "reg", NULL);
if (regs) {
/* First register entry is addr (00BBSS00) */
/* Try to enable eeh */
info.buid_lo = BUID_LO(buid);
info.buid_hi = BUID_HI(buid);
- traverse_pci_devices(phb, early_enable_eeh, NULL, &info);
+ traverse_pci_devices(phb, early_enable_eeh, &info);
}
if (eeh_subsystem_enabled) {
/* Build list of strings to match */
nstrs = 0;
- s = (char *)get_property(dn, "ibm,loc-code", 0);
+ s = (char *)get_property(dn, "ibm,loc-code", NULL);
if (s)
strs[nstrs++] = s;
sprintf(devname, "dev%04x:%04x", vendor_id, device_id);
*/
ld r11,.SYS_CALL_TABLE@toc(2)
andi. r10,r10,_TIF_32BIT
- beq- 15f
+ beq 15f
ld r11,.SYS_CALL_TABLE32@toc(2)
clrldi r3,r3,32
clrldi r4,r4,32
15:
slwi r0,r0,3
ldx r10,r11,r0 /* Fetch system call handler [ptr] */
- mtlr r10
- blrl /* Call handler */
+ mtctr r10
+ bctrl /* Call handler */
syscall_exit:
#ifdef SHOW_SYSCALLS
stdcx. r0,0,r1 /* to clear the reservation */
andi. r6,r8,MSR_PR
ld r4,_LINK(r1)
- beq 1f /* only restore r13 if */
+ beq- 1f /* only restore r13 if */
ld r13,GPR13(r1) /* returning to usermode */
1: ld r2,GPR2(r1)
ld r1,GPR1(r1)
mtspr SRR0,r7
mtspr SRR1,r8
rfid
+ b . /* prevent speculative execution */
syscall_enosys:
li r3,-ENOSYS
ld r1,GPR1(r1)
rfid
- b .
+ b . /* prevent speculative execution */
/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
do_work:
mtspr SRR0,r5
mtspr SRR1,r6
rfid
+ b . /* prevent speculative execution */
_STATIC(rtas_return_loc)
/* relocation is off at this point */
mtspr SRR0,r3
mtspr SRR1,r4
rfid
+ b . /* prevent speculative execution */
_STATIC(rtas_restore_regs)
/* relocation is on at this point */
#define EX_R13 32
#define EX_SRR0 40
#define EX_DAR 48
+#define EX_LR 48 /* SLB miss saves LR, but not DAR */
#define EX_DSISR 56
#define EX_CCR 60
mtspr SRR0,r12; \
mfspr r12,SRR1; /* and SRR1 */ \
mtspr SRR1,r10; \
- rfid
+ rfid; \
+ b . /* prevent speculative execution */
/*
* This is the start of the interrupt handlers for iSeries
. = n; \
.globl label##_Pseries; \
label##_Pseries: \
+ HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define STD_EXCEPTION_ISERIES(n, label, area) \
.globl label##_Iseries; \
label##_Iseries: \
+ HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_ISERIES_1(area); \
EXCEPTION_PROLOG_ISERIES_2; \
#define MASKABLE_EXCEPTION_ISERIES(n, label) \
.globl label##_Iseries; \
label##_Iseries: \
+ HMT_MEDIUM; \
mtspr SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
lbz r10,PACAPROFENABLED(r13); \
. = 0x200
_MachineCheckPseries:
+ HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, MachineCheck_common)
. = 0x300
.globl DataAccess_Pseries
DataAccess_Pseries:
+ HMT_MEDIUM
mtspr SPRG1,r13
BEGIN_FTR_SECTION
mtspr SPRG2,r12
. = 0x380
.globl DataAccessSLB_Pseries
DataAccessSLB_Pseries:
+ HMT_MEDIUM
mtspr SPRG1,r13
- mtspr SPRG2,r12
- mfspr r13,DAR
- mfcr r12
- srdi r13,r13,60
- cmpdi r13,0xc
- beq .do_slb_bolted_Pseries
- mtcrf 0x80,r12
- mfspr r12,SPRG2
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, DataAccessSLB_common)
+ mfspr r13,SPRG3 /* get paca address into r13 */
+ std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
+ std r10,PACA_EXSLB+EX_R10(r13)
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ std r3,PACASLBR3(r13)
+ mfspr r9,SPRG1
+ std r9,PACA_EXSLB+EX_R13(r13)
+ mfcr r9
+ clrrdi r12,r13,32 /* get high part of &label */
+ mfmsr r10
+ mfspr r11,SRR0 /* save SRR0 */
+ ori r12,r12,(.do_slb_miss)@l
+ ori r10,r10,MSR_IR|MSR_DR /* DON'T set RI for SLB miss */
+ mtspr SRR0,r12
+ mfspr r12,SRR1 /* and SRR1 */
+ mtspr SRR1,r10
+ mfspr r3,DAR
+ rfid
+ b . /* prevent speculative execution */
STD_EXCEPTION_PSERIES(0x400, InstructionAccess)
- STD_EXCEPTION_PSERIES(0x480, InstructionAccessSLB)
+
+ . = 0x480
+ .globl InstructionAccessSLB_Pseries
+InstructionAccessSLB_Pseries:
+ HMT_MEDIUM
+ mtspr SPRG1,r13
+ mfspr r13,SPRG3 /* get paca address into r13 */
+ std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
+ std r10,PACA_EXSLB+EX_R10(r13)
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ std r3,PACASLBR3(r13)
+ mfspr r9,SPRG1
+ std r9,PACA_EXSLB+EX_R13(r13)
+ mfcr r9
+ clrrdi r12,r13,32 /* get high part of &label */
+ mfmsr r10
+ mfspr r11,SRR0 /* save SRR0 */
+ ori r12,r12,(.do_slb_miss)@l
+ ori r10,r10,MSR_IR|MSR_DR /* DON'T set RI for SLB miss */
+ mtspr SRR0,r12
+ mfspr r12,SRR1 /* and SRR1 */
+ mtspr SRR1,r10
+ mr r3,r11 /* SRR0 is faulting address */
+ rfid
+ b . /* prevent speculative execution */
+
STD_EXCEPTION_PSERIES(0x500, HardwareInterrupt)
STD_EXCEPTION_PSERIES(0x600, Alignment)
STD_EXCEPTION_PSERIES(0x700, ProgramCheck)
. = 0xc00
.globl SystemCall_Pseries
SystemCall_Pseries:
+ HMT_MEDIUM
mr r9,r13
mfmsr r10
mfspr r13,SPRG3
mfspr r12,SRR1
mtspr SRR1,r10
rfid
+ b . /* prevent speculative execution */
STD_EXCEPTION_PSERIES(0xd00, SingleStep)
STD_EXCEPTION_PSERIES(0xe00, Trap_0e)
mfspr r12,SPRG2
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
-_GLOBAL(do_slb_bolted_Pseries)
- mtcrf 0x80,r12
- mfspr r12,SPRG2
- EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_slb_bolted)
-
/* Space for the naca. Architected to be located at real address
* NACA_PHYS_ADDR. Various tools rely on this location being fixed.
.globl DataAccessSLB_Iseries
DataAccessSLB_Iseries:
mtspr SPRG1,r13 /* save r13 */
- mtspr SPRG2,r12
- mfspr r13,DAR
- mfcr r12
- srdi r13,r13,60
- cmpdi r13,0xc
- beq .do_slb_bolted_Iseries
- mtcrf 0x80,r12
- mfspr r12,SPRG2
- EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
- EXCEPTION_PROLOG_ISERIES_2
- b DataAccessSLB_common
-
-.do_slb_bolted_Iseries:
- mtcrf 0x80,r12
- mfspr r12,SPRG2
EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
- EXCEPTION_PROLOG_ISERIES_2
- b .do_slb_bolted
+ std r3,PACASLBR3(r13)
+ ld r11,PACALPPACA+LPPACASRR0(r13)
+ ld r12,PACALPPACA+LPPACASRR1(r13)
+ mfspr r3,DAR
+ b .do_slb_miss
STD_EXCEPTION_ISERIES(0x400, InstructionAccess, PACA_EXGEN)
- STD_EXCEPTION_ISERIES(0x480, InstructionAccessSLB, PACA_EXGEN)
+
+ .globl InstructionAccessSLB_Iseries
+InstructionAccessSLB_Iseries:
+ mtspr SPRG1,r13 /* save r13 */
+ EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+ std r3,PACASLBR3(r13)
+ ld r11,PACALPPACA+LPPACASRR0(r13)
+ ld r12,PACALPPACA+LPPACASRR1(r13)
+ mr r3,r11
+ b .do_slb_miss
+
MASKABLE_EXCEPTION_ISERIES(0x500, HardwareInterrupt)
STD_EXCEPTION_ISERIES(0x600, Alignment, PACA_EXGEN)
STD_EXCEPTION_ISERIES(0x700, ProgramCheck, PACA_EXGEN)
li r11,1
stb r11,PACALPPACA+LPPACADECRINT(r13)
lwz r12,PACADEFAULTDECR(r13)
- mtspr DEC,r12
+ mtspr SPRN_DEC,r12
/* fall through */
.globl HardwareInterrupt_Iseries_masked
ld r12,PACA_EXGEN+EX_R12(r13)
ld r13,PACA_EXGEN+EX_R13(r13)
rfid
+ b . /* prevent speculative execution */
#endif
/*
. = 0x8000
.globl SystemReset_FWNMI
SystemReset_FWNMI:
+ HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, SystemReset_common)
.globl MachineCheck_FWNMI
MachineCheck_FWNMI:
+ HMT_MEDIUM
mtspr SPRG1,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, MachineCheck_common)
REST_4GPRS(10, r1)
ld r1,GPR1(r1)
rfid
+ b . /* prevent speculative execution */
unrecov_fer:
bl .save_nvgprs
li r5,0x300
b .do_hash_page /* Try to handle as hpte fault */
- .align 7
- .globl DataAccessSLB_common
-DataAccessSLB_common:
- mfspr r10,DAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
- ld r3,PACA_EXGEN+EX_DAR(r13)
- std r3,_DAR(r1)
- bl .slb_allocate
- cmpdi r3,0 /* Check return code */
- beq fast_exception_return /* Return if we succeeded */
- li r5,0
- std r5,_DSISR(r1)
- b .handle_page_fault
-
.align 7
.globl InstructionAccess_common
InstructionAccess_common:
li r5,0x400
b .do_hash_page /* Try to handle as hpte fault */
- .align 7
- .globl InstructionAccessSLB_common
-InstructionAccessSLB_common:
- EXCEPTION_PROLOG_COMMON(0x480, PACA_EXGEN)
- ld r3,_NIP(r1) /* SRR0 = NIA */
- bl .slb_allocate
- or. r3,r3,r3 /* Check return code */
- beq+ fast_exception_return /* Return if we succeeded */
-
- ld r4,_NIP(r1)
- li r5,0
- std r4,_DAR(r1)
- std r5,_DSISR(r1)
- b .handle_page_fault
-
.align 7
.globl HardwareInterrupt_common
.globl HardwareInterrupt_entry
bl .local_irq_restore
b 11f
#else
- beq+ fast_exception_return /* Return from exception on success */
+ beq fast_exception_return /* Return from exception on success */
/* fall through */
#endif
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
+ b . /* prevent speculative execution */
/*
* r13 points to the PACA, r9 contains the saved CR,
* r11 and r12 contain the saved SRR0 and SRR1.
+ * r3 has the faulting address
* r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
* We assume we aren't going to take any exceptions during this procedure.
*/
-/* XXX note fix masking in get_kernel_vsid to match */
-_GLOBAL(do_slb_bolted)
+_GLOBAL(do_slb_miss)
+ mflr r10
+
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
+ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
- /*
- * We take the next entry, round robin. Previously we tried
- * to find a free slot first but that took too long. Unfortunately
- * we dont have any LRU information to help us choose a slot.
- */
-
- /* r13 = paca */
-1: ld r10,PACASTABRR(r13)
- addi r9,r10,1
- cmpdi r9,SLB_NUM_ENTRIES
- blt+ 2f
- li r9,2 /* dont touch slot 0 or 1 */
-2: std r9,PACASTABRR(r13)
-
- /* r13 = paca, r10 = entry */
-
- /*
- * Never cast out the segment for our kernel stack. Since we
- * dont invalidate the ERAT we could have a valid translation
- * for the kernel stack during the first part of exception exit
- * which gets invalidated due to a tlbie from another cpu at a
- * non recoverable point (after setting srr0/1) - Anton
- */
- slbmfee r9,r10
- srdi r9,r9,27
- /*
- * Use paca->ksave as the value of the kernel stack pointer,
- * because this is valid at all times.
- * The >> 27 (rather than >> 28) is so that the LSB is the
- * valid bit - this way we check valid and ESID in one compare.
- * In order to completely close the tiny race in the context
- * switch (between updating r1 and updating paca->ksave),
- * we check against both r1 and paca->ksave.
- */
- srdi r11,r1,27
- ori r11,r11,1
- cmpd r11,r9
- beq- 1b
- ld r11,PACAKSAVE(r13)
- srdi r11,r11,27
- ori r11,r11,1
- cmpd r11,r9
- beq- 1b
-
- /* r13 = paca, r10 = entry */
-
- /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
- mfspr r9,DAR
- rldicl r11,r9,36,51
- sldi r11,r11,15
- srdi r9,r9,60
- or r11,r11,r9
-
- /* VSID_RANDOMIZER */
- li r9,9
- sldi r9,r9,32
- oris r9,r9,58231
- ori r9,r9,39831
-
- /* vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK */
- mulld r11,r11,r9
- clrldi r11,r11,28
-
- /* r13 = paca, r10 = entry, r11 = vsid */
-
- /* Put together slb word1 */
- sldi r11,r11,12
-
-BEGIN_FTR_SECTION
- /* set kp and c bits */
- ori r11,r11,0x480
-END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
-BEGIN_FTR_SECTION
- /* set kp, l and c bits */
- ori r11,r11,0x580
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
-
- /* r13 = paca, r10 = entry, r11 = slb word1 */
-
- /* Put together slb word0 */
- mfspr r9,DAR
- clrrdi r9,r9,28 /* get the new esid */
- oris r9,r9,0x800 /* set valid bit */
- rldimi r9,r10,0,52 /* insert entry */
-
- /* r13 = paca, r9 = slb word0, r11 = slb word1 */
-
- /*
- * No need for an isync before or after this slbmte. The exception
- * we enter with and the rfid we exit with are context synchronizing .
- */
- slbmte r11,r9
+ bl .slb_allocate /* handle it */
/* All done -- return from exception. */
+
+ ld r10,PACA_EXSLB+EX_LR(r13)
+ ld r3,PACASLBR3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
+ mtlr r10
+
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
beq- unrecov_slb
- /*
- * Until everyone updates binutils hardwire the POWER4 optimised
- * single field mtcrf
- */
-#if 0
- .machine push
- .machine "power4"
+.machine push
+.machine "power4"
mtcrf 0x80,r9
- .machine pop
-#else
- .long 0x7d380120
-#endif
-
- mfmsr r10
- clrrdi r10,r10,2
- mtmsrd r10,1
+ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
+.machine pop
mtspr SRR0,r11
mtspr SRR1,r12
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
+ b . /* prevent speculative execution */
unrecov_slb:
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
mtspr SRR1,r3
sync
rfid
+ b . /* prevent speculative execution */
_GLOBAL(__start_initialization_pSeries)
mr r31,r3 /* save parameters */
mr r30,r4
mtspr SRR0,r3
mtspr SRR1,r4
rfid
+ b . /* prevent speculative execution */
/*
* Running with relocation on at this point. All we want to do is
mtspr SRR0,r3
mtspr SRR1,r4
rfid
+ b . /* prevent speculative execution */
#endif /* CONFIG_PPC_PSERIES */
/* This is where all platforms converge execution */
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <asm/hvcall.h>
#include <asm/prom.h>
#include <asm/hvconsole.h>
return 0;
}
+EXPORT_SYMBOL(hvc_get_chars);
+
int hvc_put_chars(int index, const char *buf, int count)
{
unsigned long *lbuf = (unsigned long *) buf;
return -1;
}
+EXPORT_SYMBOL(hvc_put_chars);
+
/* return the number of client vterms present */
/* XXX this requires an interface change to handle multiple discontiguous
* vterms */
* we should _always_ be able to find one. */
vty = of_find_node_by_name(NULL, "vty");
if (vty && device_is_compatible(vty, "hvterm1")) {
- u32 *termno = (u32 *)get_property(vty, "reg", 0);
+ u32 *termno = (u32 *)get_property(vty, "reg", NULL);
if (termno && start_termno)
*start_termno = *termno;
#include <asm/mmu_context.h>
#include <asm/iSeries/HvCallHpt.h>
#include <asm/abs_addr.h>
-
-#if 0
#include <linux/spinlock.h>
-#include <linux/bitops.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
-#include <asm/tlbflush.h>
-#include <asm/tlb.h>
-#include <asm/cputable.h>
-#endif
+static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = { [0 ... 63] = SPIN_LOCK_UNLOCKED};
+
+/*
+ * Very primitive algorithm for picking up a lock
+ */
+static inline void iSeries_hlock(unsigned long slot)
+{
+ if (slot & 0x8)
+ slot = ~slot;
+ spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
+}
+
+static inline void iSeries_hunlock(unsigned long slot)
+{
+ if (slot & 0x8)
+ slot = ~slot;
+ spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
+}
static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long prpn, int secondary,
if (secondary)
return -1;
+ iSeries_hlock(hpte_group);
+
slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
- if (lhpte.dw0.dw0.v)
- panic("select_hpte_slot found entry already valid\n");
+ BUG_ON(lhpte.dw0.dw0.v);
- if (slot == -1) /* No available entry found in either group */
+ if (slot == -1) { /* No available entry found in either group */
+ iSeries_hunlock(hpte_group);
return -1;
+ }
if (slot < 0) { /* MSB set means secondary group */
secondary = 1;
/* Now fill in the actual HPTE */
HvCallHpt_addValidate(slot, secondary, &lhpte);
+ iSeries_hunlock(hpte_group);
+
return (secondary << 3) | (slot & 7);
}
/* Pick a random slot to start at */
slot_offset = mftb() & 0x7;
+ iSeries_hlock(hpte_group);
+
for (i = 0; i < HPTES_PER_GROUP; i++) {
lhpte.dw0.dword0 =
iSeries_hpte_getword0(hpte_group + slot_offset);
if (!lhpte.dw0.dw0.bolted) {
HvCallHpt_invalidateSetSwBitsGet(hpte_group +
slot_offset, 0, 0);
+ iSeries_hunlock(hpte_group);
return i;
}
slot_offset &= 0x7;
}
+ iSeries_hunlock(hpte_group);
+
return -1;
}
HPTE hpte;
unsigned long avpn = va >> 23;
+ iSeries_hlock(slot);
+
HvCallHpt_get(&hpte, slot);
if ((hpte.dw0.dw0.avpn == avpn) && (hpte.dw0.dw0.v)) {
HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
+ iSeries_hunlock(slot);
return 0;
}
+ iSeries_hunlock(slot);
+
return -1;
}
{
HPTE lhpte;
unsigned long avpn = va >> 23;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ iSeries_hlock(slot);
lhpte.dw0.dword0 = iSeries_hpte_getword0(slot);
if ((lhpte.dw0.dw0.avpn == avpn) && lhpte.dw0.dw0.v)
HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
+
+ iSeries_hunlock(slot);
+
+ local_irq_restore(flags);
}
void hpte_init_iSeries(void)
#endif /* CONFIG_SMP */
-/* XXX Make this into free_irq() - Anton */
-
-/* This could be promoted to a real free_irq() ... */
-static int
-do_free_irq(int irq, void* dev_id)
-{
- irq_desc_t *desc = get_irq_desc(irq);
- struct irqaction **p;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock,flags);
- p = &desc->action;
- for (;;) {
- struct irqaction * action = *p;
- if (action) {
- struct irqaction **pp = p;
- p = &action->next;
- if (action->dev_id != dev_id)
- continue;
-
- /* Found it - now remove it from the list of entries */
- *pp = action->next;
- if (!desc->action) {
- desc->status |= IRQ_DISABLED;
- mask_irq(irq);
- }
- spin_unlock_irqrestore(&desc->lock,flags);
-
- /* Wait to make sure it's not being used on another CPU */
- synchronize_irq(irq);
- kfree(action);
- return 0;
- }
- printk("Trying to free free IRQ%d\n",irq);
- spin_unlock_irqrestore(&desc->lock,flags);
- break;
- }
- return -ENOENT;
-}
-
-
int request_irq(unsigned int irq,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char * devname, void *dev_id)
if (irq >= NR_IRQS)
return -EINVAL;
if (!handler)
- /* We could implement really free_irq() instead of that... */
- return do_free_irq(irq, dev_id);
+ return -EINVAL;
action = (struct irqaction *)
kmalloc(sizeof(struct irqaction), GFP_KERNEL);
void free_irq(unsigned int irq, void *dev_id)
{
- request_irq(irq, NULL, 0, NULL, dev_id);
+ irq_desc_t *desc = get_irq_desc(irq);
+ struct irqaction **p;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ mask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ /* Wait to make sure it's not being used on another CPU */
+ synchronize_irq(irq);
+ kfree(action);
+ return;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ break;
+ }
+ return;
}
EXPORT_SYMBOL(free_irq);
}
#ifdef CONFIG_PPC_ISERIES
-int do_IRQ(struct pt_regs *regs)
+void do_IRQ(struct pt_regs *regs)
{
struct paca_struct *lpaca;
struct ItLpQueue *lpq;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
}
-
- return 1; /* lets ret_from_int know we can do checks */
}
#else /* CONFIG_PPC_ISERIES */
-int do_IRQ(struct pt_regs *regs)
+void do_IRQ(struct pt_regs *regs)
{
- int irq, first = 1;
+ int irq;
irq_enter();
}
#endif
- /*
- * Every arch is required to implement ppc_md.get_irq.
- * This function will either return an irq number or -1 to
- * indicate there are no more pending. But the first time
- * through the loop this means there wasn't an IRQ pending.
- * The value -2 is for buggy hardware and means that this IRQ
- * has already been handled. -- Tom
- */
- while ((irq = ppc_md.get_irq(regs)) >= 0) {
+ irq = ppc_md.get_irq(regs);
+
+ if (irq >= 0)
ppc_irq_dispatch_handler(regs, irq);
- first = 0;
- }
- if (irq != -2 && first)
+ else
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
irq_exit();
-
- return 1; /* lets ret_from_int know we can do checks */
}
#endif /* CONFIG_PPC_ISERIES */
.llong .compat_sys_sched_setaffinity
.llong .compat_sys_sched_getaffinity
.llong .sys_ni_syscall
- .llong .sys_ni_syscall /* 225 - reserved for tux */
+#ifdef CONFIG_TUX
+ .llong .__sys_tux
+#else
+# ifdef CONFIG_TUX_MODULE
+ .llong .sys_tux
+# else
+ .llong .sys_ni_syscall
+# endif
+#endif
.llong .sys32_sendfile64
.llong .compat_sys_io_setup
.llong .sys_io_destroy
.llong .sys_sched_setaffinity
.llong .sys_sched_getaffinity
.llong .sys_ni_syscall
- .llong .sys_ni_syscall /* 225 - reserved for tux */
+#ifdef CONFIG_TUX
+ .llong .__sys_tux
+#else
+# ifdef CONFIG_TUX_MODULE
+ .llong .sys_tux
+# else
+ .llong .sys_ni_syscall
+# endif
+#endif
.llong .sys_ni_syscall /* 32bit only sendfile64 */
.llong .sys_io_setup
.llong .sys_io_destroy
struct device_node *np;
int i;
unsigned int *addrp;
- unsigned char* chrp_int_ack_special = 0;
+ unsigned char* chrp_int_ack_special = NULL;
unsigned char init_senses[NR_IRQS - NUM_ISA_INTERRUPTS];
int nmi_irq = -1;
#if defined(CONFIG_VT) && defined(CONFIG_ADB_KEYBOARD) && defined(XMON)
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(openpic_vec_ipi, openpic_ipi_action, SA_INTERRUPT,
- "IPI0 (call function)", 0);
+ "IPI0 (call function)", NULL);
request_irq(openpic_vec_ipi+1, openpic_ipi_action, SA_INTERRUPT,
- "IPI1 (reschedule)", 0);
+ "IPI1 (reschedule)", NULL);
request_irq(openpic_vec_ipi+2, openpic_ipi_action, SA_INTERRUPT,
- "IPI2 (unused)", 0);
+ "IPI2 (unused)", NULL);
request_irq(openpic_vec_ipi+3, openpic_ipi_action, SA_INTERRUPT,
- "IPI3 (debugger break)", 0);
+ "IPI3 (debugger break)", NULL);
for ( i = 0; i < OPENPIC_NUM_IPI ; i++ )
openpic_enable_ipi(openpic_vec_ipi+i);
HPTE *hptep = htab_data.htab + slot;
Hpte_dword0 dw0;
unsigned long avpn = va >> 23;
- unsigned long flags;
int ret = 0;
if (large)
tlbiel(va);
} else {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_lock_irqsave(&pSeries_tlbie_lock, flags);
+ spin_lock(&pSeries_tlbie_lock);
tlbie(va, large);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
+ spin_unlock(&pSeries_tlbie_lock);
}
return ret;
if (large)
avpn &= ~0x1UL;
+ local_irq_save(flags);
pSeries_lock_hpte(hptep);
dw0 = hptep->dw0.dw0;
tlbiel(va);
} else {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_lock_irqsave(&pSeries_tlbie_lock, flags);
+ spin_lock(&pSeries_tlbie_lock);
tlbie(va, large);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
+ spin_unlock(&pSeries_tlbie_lock);
}
+ local_irq_restore(flags);
}
static void pSeries_flush_hash_range(unsigned long context,
/* XXX fix for large ptes */
unsigned long large = 0;
+ local_irq_save(flags);
+
j = 0;
for (i = 0; i < number; i++) {
if ((batch->addr[i] >= USER_START) &&
} else {
/* XXX double check that it is safe to take this late */
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_lock_irqsave(&pSeries_tlbie_lock, flags);
+ spin_lock(&pSeries_tlbie_lock);
asm volatile("ptesync":::"memory");
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
- spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
+ spin_unlock(&pSeries_tlbie_lock);
}
+
+ local_irq_restore(flags);
}
void hpte_init_pSeries(void)
bus = pci_bus_b(ln);
busdn = PCI_GET_DN(bus);
- dma_window = (unsigned int *)get_property(busdn, "ibm,dma-window", 0);
+ dma_window = (unsigned int *)get_property(busdn, "ibm,dma-window", NULL);
if (dma_window) {
/* Bussubno hasn't been copied yet.
* Do it now because iommu_table_setparms_lpar needs it.
{
unsigned int *dma_window;
- dma_window = (unsigned int *)get_property(dn, "ibm,dma-window", 0);
+ dma_window = (unsigned int *)get_property(dn, "ibm,dma-window", NULL);
if (!dma_window)
panic("iommu_table_setparms_lpar: device %s has no"
}
/* now we have the stdout node; figure out what type of device it is. */
- name = (char *)get_property(stdout_node, "name", 0);
+ name = (char *)get_property(stdout_node, "name", NULL);
if (!name) {
printk(KERN_WARNING "stdout node missing 'name' property!\n");
goto out;
if (strncmp(name, "vty", 3) == 0) {
if (device_is_compatible(stdout_node, "hvterm1")) {
- termno = (u32 *)get_property(stdout_node, "reg", 0);
+ termno = (u32 *)get_property(stdout_node, "reg", NULL);
if (termno) {
vtermno = termno[0];
ppc_md.udbg_putc = udbg_putcLP;
isa_dn = of_find_node_by_type(NULL, "isa");
if (isa_dn) {
isa_io_base = pci_io_base;
- of_node_put(isa_dn);
pci_process_ISA_OF_ranges(isa_dn,
hose->io_base_phys,
hose->io_base_virt);
+ of_node_put(isa_dn);
/* Allow all IO */
io_page_mask = -1;
}
BUG(); /* No I/O resource for this PHB? */
if (request_resource(&ioport_resource, res))
- printk(KERN_ERR "Failed to request IO"
- "on hose %d\n", 0 /* FIXME */);
+ printk(KERN_ERR "Failed to request IO on "
+ "PCI domain %d\n", pci_domain_nr(bus));
+
for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i];
BUG(); /* No memory resource for this PHB? */
bus->resource[i+1] = res;
if (res->flags && request_resource(&iomem_resource, res))
- printk(KERN_ERR "Failed to request MEM"
- "on hose %d\n", 0 /* FIXME */);
+ printk(KERN_ERR "Failed to request MEM on "
+ "PCI domain %d\n",
+ pci_domain_nr(bus));
}
} else if (pci_probe_only &&
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
/* Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled.
+ *
+ * ABI requires stack to be 128-byte aligned
*/
-char emergency_stack[PAGE_SIZE * NR_CPUS];
+char emergency_stack[PAGE_SIZE * NR_CPUS] __attribute__((aligned(128)));
/* The Paca is an array with one entry per processor. Each contains an
* ItLpPaca, which contains the information shared between the
.stab_addr = (asrv), /* Virt pointer to segment table */ \
.emergency_sp = &emergency_stack[((number)+1) * PAGE_SIZE], \
.cpu_start = (start), /* Processor start */ \
- .stab_next_rr = 1, \
.lppaca = { \
.xDesc = 0xd397d781, /* "LpPa" */ \
.xSize = sizeof(struct ItLpPaca), \
#ifdef CONFIG_PPC_ISERIES
PACAINITDATA( 0, 1, &xItLpQueue, 0, STAB0_VIRT_ADDR),
#else
- PACAINITDATA( 0, 1, 0, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR),
+ PACAINITDATA( 0, 1, NULL, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR),
#endif
- PACAINITDATA( 1, 0, 0, 0, 0),
- PACAINITDATA( 2, 0, 0, 0, 0),
- PACAINITDATA( 3, 0, 0, 0, 0),
- PACAINITDATA( 4, 0, 0, 0, 0),
- PACAINITDATA( 5, 0, 0, 0, 0),
- PACAINITDATA( 6, 0, 0, 0, 0),
- PACAINITDATA( 7, 0, 0, 0, 0),
- PACAINITDATA( 8, 0, 0, 0, 0),
- PACAINITDATA( 9, 0, 0, 0, 0),
- PACAINITDATA(10, 0, 0, 0, 0),
- PACAINITDATA(11, 0, 0, 0, 0),
- PACAINITDATA(12, 0, 0, 0, 0),
- PACAINITDATA(13, 0, 0, 0, 0),
- PACAINITDATA(14, 0, 0, 0, 0),
- PACAINITDATA(15, 0, 0, 0, 0),
- PACAINITDATA(16, 0, 0, 0, 0),
- PACAINITDATA(17, 0, 0, 0, 0),
- PACAINITDATA(18, 0, 0, 0, 0),
- PACAINITDATA(19, 0, 0, 0, 0),
- PACAINITDATA(20, 0, 0, 0, 0),
- PACAINITDATA(21, 0, 0, 0, 0),
- PACAINITDATA(22, 0, 0, 0, 0),
- PACAINITDATA(23, 0, 0, 0, 0),
- PACAINITDATA(24, 0, 0, 0, 0),
- PACAINITDATA(25, 0, 0, 0, 0),
- PACAINITDATA(26, 0, 0, 0, 0),
- PACAINITDATA(27, 0, 0, 0, 0),
- PACAINITDATA(28, 0, 0, 0, 0),
- PACAINITDATA(29, 0, 0, 0, 0),
- PACAINITDATA(30, 0, 0, 0, 0),
- PACAINITDATA(31, 0, 0, 0, 0),
+ PACAINITDATA( 1, 0, NULL, 0, 0),
+ PACAINITDATA( 2, 0, NULL, 0, 0),
+ PACAINITDATA( 3, 0, NULL, 0, 0),
+ PACAINITDATA( 4, 0, NULL, 0, 0),
+ PACAINITDATA( 5, 0, NULL, 0, 0),
+ PACAINITDATA( 6, 0, NULL, 0, 0),
+ PACAINITDATA( 7, 0, NULL, 0, 0),
+ PACAINITDATA( 8, 0, NULL, 0, 0),
+ PACAINITDATA( 9, 0, NULL, 0, 0),
+ PACAINITDATA(10, 0, NULL, 0, 0),
+ PACAINITDATA(11, 0, NULL, 0, 0),
+ PACAINITDATA(12, 0, NULL, 0, 0),
+ PACAINITDATA(13, 0, NULL, 0, 0),
+ PACAINITDATA(14, 0, NULL, 0, 0),
+ PACAINITDATA(15, 0, NULL, 0, 0),
+ PACAINITDATA(16, 0, NULL, 0, 0),
+ PACAINITDATA(17, 0, NULL, 0, 0),
+ PACAINITDATA(18, 0, NULL, 0, 0),
+ PACAINITDATA(19, 0, NULL, 0, 0),
+ PACAINITDATA(20, 0, NULL, 0, 0),
+ PACAINITDATA(21, 0, NULL, 0, 0),
+ PACAINITDATA(22, 0, NULL, 0, 0),
+ PACAINITDATA(23, 0, NULL, 0, 0),
+ PACAINITDATA(24, 0, NULL, 0, 0),
+ PACAINITDATA(25, 0, NULL, 0, 0),
+ PACAINITDATA(26, 0, NULL, 0, 0),
+ PACAINITDATA(27, 0, NULL, 0, 0),
+ PACAINITDATA(28, 0, NULL, 0, 0),
+ PACAINITDATA(29, 0, NULL, 0, 0),
+ PACAINITDATA(30, 0, NULL, 0, 0),
+ PACAINITDATA(31, 0, NULL, 0, 0),
#if NR_CPUS > 32
- PACAINITDATA(32, 0, 0, 0, 0),
- PACAINITDATA(33, 0, 0, 0, 0),
- PACAINITDATA(34, 0, 0, 0, 0),
- PACAINITDATA(35, 0, 0, 0, 0),
- PACAINITDATA(36, 0, 0, 0, 0),
- PACAINITDATA(37, 0, 0, 0, 0),
- PACAINITDATA(38, 0, 0, 0, 0),
- PACAINITDATA(39, 0, 0, 0, 0),
- PACAINITDATA(40, 0, 0, 0, 0),
- PACAINITDATA(41, 0, 0, 0, 0),
- PACAINITDATA(42, 0, 0, 0, 0),
- PACAINITDATA(43, 0, 0, 0, 0),
- PACAINITDATA(44, 0, 0, 0, 0),
- PACAINITDATA(45, 0, 0, 0, 0),
- PACAINITDATA(46, 0, 0, 0, 0),
- PACAINITDATA(47, 0, 0, 0, 0),
- PACAINITDATA(48, 0, 0, 0, 0),
- PACAINITDATA(49, 0, 0, 0, 0),
- PACAINITDATA(50, 0, 0, 0, 0),
- PACAINITDATA(51, 0, 0, 0, 0),
- PACAINITDATA(52, 0, 0, 0, 0),
- PACAINITDATA(53, 0, 0, 0, 0),
- PACAINITDATA(54, 0, 0, 0, 0),
- PACAINITDATA(55, 0, 0, 0, 0),
- PACAINITDATA(56, 0, 0, 0, 0),
- PACAINITDATA(57, 0, 0, 0, 0),
- PACAINITDATA(58, 0, 0, 0, 0),
- PACAINITDATA(59, 0, 0, 0, 0),
- PACAINITDATA(60, 0, 0, 0, 0),
- PACAINITDATA(61, 0, 0, 0, 0),
- PACAINITDATA(62, 0, 0, 0, 0),
- PACAINITDATA(63, 0, 0, 0, 0),
+ PACAINITDATA(32, 0, NULL, 0, 0),
+ PACAINITDATA(33, 0, NULL, 0, 0),
+ PACAINITDATA(34, 0, NULL, 0, 0),
+ PACAINITDATA(35, 0, NULL, 0, 0),
+ PACAINITDATA(36, 0, NULL, 0, 0),
+ PACAINITDATA(37, 0, NULL, 0, 0),
+ PACAINITDATA(38, 0, NULL, 0, 0),
+ PACAINITDATA(39, 0, NULL, 0, 0),
+ PACAINITDATA(40, 0, NULL, 0, 0),
+ PACAINITDATA(41, 0, NULL, 0, 0),
+ PACAINITDATA(42, 0, NULL, 0, 0),
+ PACAINITDATA(43, 0, NULL, 0, 0),
+ PACAINITDATA(44, 0, NULL, 0, 0),
+ PACAINITDATA(45, 0, NULL, 0, 0),
+ PACAINITDATA(46, 0, NULL, 0, 0),
+ PACAINITDATA(47, 0, NULL, 0, 0),
+ PACAINITDATA(48, 0, NULL, 0, 0),
+ PACAINITDATA(49, 0, NULL, 0, 0),
+ PACAINITDATA(50, 0, NULL, 0, 0),
+ PACAINITDATA(51, 0, NULL, 0, 0),
+ PACAINITDATA(52, 0, NULL, 0, 0),
+ PACAINITDATA(53, 0, NULL, 0, 0),
+ PACAINITDATA(54, 0, NULL, 0, 0),
+ PACAINITDATA(55, 0, NULL, 0, 0),
+ PACAINITDATA(56, 0, NULL, 0, 0),
+ PACAINITDATA(57, 0, NULL, 0, 0),
+ PACAINITDATA(58, 0, NULL, 0, 0),
+ PACAINITDATA(59, 0, NULL, 0, 0),
+ PACAINITDATA(60, 0, NULL, 0, 0),
+ PACAINITDATA(61, 0, NULL, 0, 0),
+ PACAINITDATA(62, 0, NULL, 0, 0),
+ PACAINITDATA(63, 0, NULL, 0, 0),
#if NR_CPUS > 64
- PACAINITDATA(64, 0, 0, 0, 0),
- PACAINITDATA(65, 0, 0, 0, 0),
- PACAINITDATA(66, 0, 0, 0, 0),
- PACAINITDATA(67, 0, 0, 0, 0),
- PACAINITDATA(68, 0, 0, 0, 0),
- PACAINITDATA(69, 0, 0, 0, 0),
- PACAINITDATA(70, 0, 0, 0, 0),
- PACAINITDATA(71, 0, 0, 0, 0),
- PACAINITDATA(72, 0, 0, 0, 0),
- PACAINITDATA(73, 0, 0, 0, 0),
- PACAINITDATA(74, 0, 0, 0, 0),
- PACAINITDATA(75, 0, 0, 0, 0),
- PACAINITDATA(76, 0, 0, 0, 0),
- PACAINITDATA(77, 0, 0, 0, 0),
- PACAINITDATA(78, 0, 0, 0, 0),
- PACAINITDATA(79, 0, 0, 0, 0),
- PACAINITDATA(80, 0, 0, 0, 0),
- PACAINITDATA(81, 0, 0, 0, 0),
- PACAINITDATA(82, 0, 0, 0, 0),
- PACAINITDATA(83, 0, 0, 0, 0),
- PACAINITDATA(84, 0, 0, 0, 0),
- PACAINITDATA(85, 0, 0, 0, 0),
- PACAINITDATA(86, 0, 0, 0, 0),
- PACAINITDATA(87, 0, 0, 0, 0),
- PACAINITDATA(88, 0, 0, 0, 0),
- PACAINITDATA(89, 0, 0, 0, 0),
- PACAINITDATA(90, 0, 0, 0, 0),
- PACAINITDATA(91, 0, 0, 0, 0),
- PACAINITDATA(92, 0, 0, 0, 0),
- PACAINITDATA(93, 0, 0, 0, 0),
- PACAINITDATA(94, 0, 0, 0, 0),
- PACAINITDATA(95, 0, 0, 0, 0),
- PACAINITDATA(96, 0, 0, 0, 0),
- PACAINITDATA(97, 0, 0, 0, 0),
- PACAINITDATA(98, 0, 0, 0, 0),
- PACAINITDATA(99, 0, 0, 0, 0),
- PACAINITDATA(100, 0, 0, 0, 0),
- PACAINITDATA(101, 0, 0, 0, 0),
- PACAINITDATA(102, 0, 0, 0, 0),
- PACAINITDATA(103, 0, 0, 0, 0),
- PACAINITDATA(104, 0, 0, 0, 0),
- PACAINITDATA(105, 0, 0, 0, 0),
- PACAINITDATA(106, 0, 0, 0, 0),
- PACAINITDATA(107, 0, 0, 0, 0),
- PACAINITDATA(108, 0, 0, 0, 0),
- PACAINITDATA(109, 0, 0, 0, 0),
- PACAINITDATA(110, 0, 0, 0, 0),
- PACAINITDATA(111, 0, 0, 0, 0),
- PACAINITDATA(112, 0, 0, 0, 0),
- PACAINITDATA(113, 0, 0, 0, 0),
- PACAINITDATA(114, 0, 0, 0, 0),
- PACAINITDATA(115, 0, 0, 0, 0),
- PACAINITDATA(116, 0, 0, 0, 0),
- PACAINITDATA(117, 0, 0, 0, 0),
- PACAINITDATA(118, 0, 0, 0, 0),
- PACAINITDATA(119, 0, 0, 0, 0),
- PACAINITDATA(120, 0, 0, 0, 0),
- PACAINITDATA(121, 0, 0, 0, 0),
- PACAINITDATA(122, 0, 0, 0, 0),
- PACAINITDATA(123, 0, 0, 0, 0),
- PACAINITDATA(124, 0, 0, 0, 0),
- PACAINITDATA(125, 0, 0, 0, 0),
- PACAINITDATA(126, 0, 0, 0, 0),
- PACAINITDATA(127, 0, 0, 0, 0),
+ PACAINITDATA(64, 0, NULL, 0, 0),
+ PACAINITDATA(65, 0, NULL, 0, 0),
+ PACAINITDATA(66, 0, NULL, 0, 0),
+ PACAINITDATA(67, 0, NULL, 0, 0),
+ PACAINITDATA(68, 0, NULL, 0, 0),
+ PACAINITDATA(69, 0, NULL, 0, 0),
+ PACAINITDATA(70, 0, NULL, 0, 0),
+ PACAINITDATA(71, 0, NULL, 0, 0),
+ PACAINITDATA(72, 0, NULL, 0, 0),
+ PACAINITDATA(73, 0, NULL, 0, 0),
+ PACAINITDATA(74, 0, NULL, 0, 0),
+ PACAINITDATA(75, 0, NULL, 0, 0),
+ PACAINITDATA(76, 0, NULL, 0, 0),
+ PACAINITDATA(77, 0, NULL, 0, 0),
+ PACAINITDATA(78, 0, NULL, 0, 0),
+ PACAINITDATA(79, 0, NULL, 0, 0),
+ PACAINITDATA(80, 0, NULL, 0, 0),
+ PACAINITDATA(81, 0, NULL, 0, 0),
+ PACAINITDATA(82, 0, NULL, 0, 0),
+ PACAINITDATA(83, 0, NULL, 0, 0),
+ PACAINITDATA(84, 0, NULL, 0, 0),
+ PACAINITDATA(85, 0, NULL, 0, 0),
+ PACAINITDATA(86, 0, NULL, 0, 0),
+ PACAINITDATA(87, 0, NULL, 0, 0),
+ PACAINITDATA(88, 0, NULL, 0, 0),
+ PACAINITDATA(89, 0, NULL, 0, 0),
+ PACAINITDATA(90, 0, NULL, 0, 0),
+ PACAINITDATA(91, 0, NULL, 0, 0),
+ PACAINITDATA(92, 0, NULL, 0, 0),
+ PACAINITDATA(93, 0, NULL, 0, 0),
+ PACAINITDATA(94, 0, NULL, 0, 0),
+ PACAINITDATA(95, 0, NULL, 0, 0),
+ PACAINITDATA(96, 0, NULL, 0, 0),
+ PACAINITDATA(97, 0, NULL, 0, 0),
+ PACAINITDATA(98, 0, NULL, 0, 0),
+ PACAINITDATA(99, 0, NULL, 0, 0),
+ PACAINITDATA(100, 0, NULL, 0, 0),
+ PACAINITDATA(101, 0, NULL, 0, 0),
+ PACAINITDATA(102, 0, NULL, 0, 0),
+ PACAINITDATA(103, 0, NULL, 0, 0),
+ PACAINITDATA(104, 0, NULL, 0, 0),
+ PACAINITDATA(105, 0, NULL, 0, 0),
+ PACAINITDATA(106, 0, NULL, 0, 0),
+ PACAINITDATA(107, 0, NULL, 0, 0),
+ PACAINITDATA(108, 0, NULL, 0, 0),
+ PACAINITDATA(109, 0, NULL, 0, 0),
+ PACAINITDATA(110, 0, NULL, 0, 0),
+ PACAINITDATA(111, 0, NULL, 0, 0),
+ PACAINITDATA(112, 0, NULL, 0, 0),
+ PACAINITDATA(113, 0, NULL, 0, 0),
+ PACAINITDATA(114, 0, NULL, 0, 0),
+ PACAINITDATA(115, 0, NULL, 0, 0),
+ PACAINITDATA(116, 0, NULL, 0, 0),
+ PACAINITDATA(117, 0, NULL, 0, 0),
+ PACAINITDATA(118, 0, NULL, 0, 0),
+ PACAINITDATA(119, 0, NULL, 0, 0),
+ PACAINITDATA(120, 0, NULL, 0, 0),
+ PACAINITDATA(121, 0, NULL, 0, 0),
+ PACAINITDATA(122, 0, NULL, 0, 0),
+ PACAINITDATA(123, 0, NULL, 0, 0),
+ PACAINITDATA(124, 0, NULL, 0, 0),
+ PACAINITDATA(125, 0, NULL, 0, 0),
+ PACAINITDATA(126, 0, NULL, 0, 0),
+ PACAINITDATA(127, 0, NULL, 0, 0),
#endif
#endif
};
*******************************************************************/
struct device_node;
typedef void *(*traverse_func)(struct device_node *me, void *data);
-void *traverse_pci_devices(struct device_node *start, traverse_func pre, traverse_func post, void *data);
-void *traverse_all_pci_devices(traverse_func pre);
+void *traverse_pci_devices(struct device_node *start, traverse_func pre,
+ void *data);
void pci_devs_phb_init(void);
void pci_fix_bus_sysdata(void);
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "pci.h"
-/* Traverse_func that inits the PCI fields of the device node.
+/*
+ * Traverse_func that inits the PCI fields of the device node.
* NOTE: this *must* be done before read/write config to the device.
*/
-static void * __init
-update_dn_pci_info(struct device_node *dn, void *data)
+static void * __init update_dn_pci_info(struct device_node *dn, void *data)
{
-#ifdef CONFIG_PPC_PSERIES
- struct pci_controller *phb = (struct pci_controller *)data;
+ struct pci_controller *phb = data;
u32 *regs;
- char *device_type = get_property(dn, "device_type", 0);
+ char *device_type = get_property(dn, "device_type", NULL);
char *model;
dn->phb = phb;
- if (device_type && strcmp(device_type, "pci") == 0 && get_property(dn, "class-code", 0) == 0) {
+ if (device_type && (strcmp(device_type, "pci") == 0) &&
+ (get_property(dn, "class-code", NULL) == 0)) {
/* special case for PHB's. Sigh. */
- regs = (u32 *)get_property(dn, "bus-range", 0);
+ regs = (u32 *)get_property(dn, "bus-range", NULL);
dn->busno = regs[0];
model = (char *)get_property(dn, "model", NULL);
else
dn->devfn = 0; /* assumption */
} else {
- regs = (u32 *)get_property(dn, "reg", 0);
+ regs = (u32 *)get_property(dn, "reg", NULL);
if (regs) {
/* First register entry is addr (00BBSS00) */
dn->busno = (regs[0] >> 16) & 0xff;
dn->devfn = (regs[0] >> 8) & 0xff;
}
}
-#endif
return NULL;
}
-/******************************************************************
+/*
* Traverse a device tree stopping each PCI device in the tree.
* This is done depth first. As each node is processed, a "pre"
- * function is called, the children are processed recursively, and
- * then a "post" function is called.
+ * function is called and the children are processed recursively.
*
- * The "pre" and "post" funcs return a value. If non-zero
- * is returned from the "pre" func, the traversal stops and this
- * value is returned. The return value from "post" is not used.
- * This return value is useful when using traverse as
- * a method of finding a device.
+ * The "pre" func returns a value. If non-zero is returned from
+ * the "pre" func, the traversal stops and this value is returned.
+ * This return value is useful when using traverse as a method of
+ * finding a device.
*
- * NOTE: we do not run the funcs for devices that do not appear to
+ * NOTE: we do not run the func for devices that do not appear to
* be PCI except for the start node which we assume (this is good
* because the start node is often a phb which may be missing PCI
* properties).
* We use the class-code as an indicator. If we run into
* one of these nodes we also assume its siblings are non-pci for
* performance.
- *
- ******************************************************************/
-void *traverse_pci_devices(struct device_node *start, traverse_func pre, traverse_func post, void *data)
+ */
+void *traverse_pci_devices(struct device_node *start, traverse_func pre,
+ void *data)
{
struct device_node *dn, *nextdn;
void *ret;
- if (pre && (ret = pre(start, data)) != NULL)
+ if (pre && ((ret = pre(start, data)) != NULL))
return ret;
for (dn = start->child; dn; dn = nextdn) {
nextdn = NULL;
-#ifdef CONFIG_PPC_PSERIES
- if (get_property(dn, "class-code", 0)) {
- if (pre && (ret = pre(dn, data)) != NULL)
+ if (get_property(dn, "class-code", NULL)) {
+ if (pre && ((ret = pre(dn, data)) != NULL))
return ret;
- if (dn->child) {
+ if (dn->child)
/* Depth first...do children */
nextdn = dn->child;
- } else if (dn->sibling) {
+ else if (dn->sibling)
/* ok, try next sibling instead. */
nextdn = dn->sibling;
- } else {
- /* no more children or siblings...call "post" */
- if (post)
- post(dn, data);
- }
}
-#endif
if (!nextdn) {
/* Walk up to next valid sibling. */
do {
return NULL;
}
-/* Same as traverse_pci_devices except this does it for all phbs.
+/*
+ * Same as traverse_pci_devices except this does it for all phbs.
*/
-void *traverse_all_pci_devices(traverse_func pre)
+static void *traverse_all_pci_devices(traverse_func pre)
{
- struct pci_controller* phb;
+ struct pci_controller *phb;
void *ret;
- for (phb=hose_head;phb;phb=phb->next)
- if ((ret = traverse_pci_devices((struct device_node *)phb->arch_data, pre, NULL, phb)) != NULL)
+
+ for (phb = hose_head; phb; phb = phb->next)
+ if ((ret = traverse_pci_devices(phb->arch_data, pre, phb))
+ != NULL)
return ret;
return NULL;
}
-/* Traversal func that looks for a <busno,devfcn> value.
+/*
+ * Traversal func that looks for a <busno,devfcn> value.
* If found, the device_node is returned (thus terminating the traversal).
*/
-static void *
-is_devfn_node(struct device_node *dn, void *data)
+static void *is_devfn_node(struct device_node *dn, void *data)
{
int busno = ((unsigned long)data >> 8) & 0xff;
int devfn = ((unsigned long)data) & 0xff;
- return (devfn == dn->devfn && busno == dn->busno) ? dn : NULL;
+ return ((devfn == dn->devfn) && (busno == dn->busno)) ? dn : NULL;
}
-/* This is the "slow" path for looking up a device_node from a
+/*
+ * This is the "slow" path for looking up a device_node from a
* pci_dev. It will hunt for the device under its parent's
* phb and then update sysdata for a future fastpath.
*
*/
struct device_node *fetch_dev_dn(struct pci_dev *dev)
{
- struct device_node *orig_dn = (struct device_node *)dev->sysdata;
+ struct device_node *orig_dn = dev->sysdata;
struct pci_controller *phb = orig_dn->phb; /* assume same phb as orig_dn */
struct device_node *phb_dn;
struct device_node *dn;
unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
- phb_dn = (struct device_node *)(phb->arch_data);
- dn = (struct device_node *)traverse_pci_devices(phb_dn, is_devfn_node, NULL, (void *)searchval);
+ phb_dn = phb->arch_data;
+ dn = traverse_pci_devices(phb_dn, is_devfn_node, (void *)searchval);
if (dn) {
dev->sysdata = dn;
/* ToDo: call some device init hook here */
EXPORT_SYMBOL(fetch_dev_dn);
-/******************************************************************
+/*
* Actually initialize the phbs.
* The buswalk on this phb has not happened yet.
- ******************************************************************/
-void __init
-pci_devs_phb_init(void)
+ */
+void __init pci_devs_phb_init(void)
{
/* This must be done first so the device nodes have valid pci info! */
traverse_all_pci_devices(update_dn_pci_info);
}
-static void __init
-pci_fixup_bus_sysdata_list(struct list_head *bus_list)
+static void __init pci_fixup_bus_sysdata_list(struct list_head *bus_list)
{
struct list_head *ln;
struct pci_bus *bus;
- for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
+ for (ln = bus_list->next; ln != bus_list; ln = ln->next) {
bus = pci_bus_b(ln);
if (bus->self)
bus->sysdata = bus->self->sysdata;
}
}
-/******************************************************************
+/*
* Fixup the bus->sysdata ptrs to point to the bus' device_node.
* This is done late in pcibios_init(). We do this mostly for
* sanity, but pci_dma.c uses these at DMA time so they must be
* To do this we recurse down the bus hierarchy. Note that PHB's
* have bus->self == NULL, but fortunately bus->sysdata is already
* correct in this case.
- ******************************************************************/
-void __init
-pci_fix_bus_sysdata(void)
+ */
+void __init pci_fix_bus_sysdata(void)
{
pci_fixup_bus_sysdata_list(&pci_root_buses);
}
int len;
/* For PCI<->PCI bridges or CardBus bridges, we go down */
- class_code = (unsigned int *) get_property(node, "class-code", 0);
+ class_code = (unsigned int *) get_property(node, "class-code", NULL);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
{
struct proc_dir_entry *root;
- root = proc_mkdir("ppc64", 0);
+ root = proc_mkdir("ppc64", NULL);
if (!root)
return 1;
if (!proc_mkdir("rtas", root))
return 1;
- if (!proc_symlink("rtas", 0, "ppc64/rtas"))
+ if (!proc_symlink("rtas", NULL, "ppc64/rtas"))
return 1;
return 0;
return (file->f_pos = new);
}
-static ssize_t page_map_read( struct file *file, char *buf, size_t nbytes, loff_t *ppos)
+static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- unsigned pos = *ppos;
struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
-
- if ( pos >= dp->size )
- return 0;
- if ( nbytes >= dp->size )
- nbytes = dp->size;
- if ( pos + nbytes > dp->size )
- nbytes = dp->size - pos;
-
- copy_to_user( buf, (char *)dp->data + pos, nbytes );
- *ppos = pos + nbytes;
- return nbytes;
+ return simple_read_from_buffer(buf, nbytes, ppos, dp->data, dp->size);
}
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
/* prom structure */
struct prom_t prom;
-char *prom_display_paths[FB_MAX] __initdata = { 0, };
+char *prom_display_paths[FB_MAX] __initdata = { NULL, };
phandle prom_display_nodes[FB_MAX] __initdata;
unsigned int prom_num_displays = 0;
-char *of_stdout_device = 0;
+char *of_stdout_device = NULL;
static int iommu_force_on;
int ppc64_iommu_off;
#define MAX_PHB (32 * 6) /* 32 drawers * 6 PHBs/drawer */
struct of_tce_table of_tce_table[MAX_PHB + 1];
-char *bootpath = 0;
-char *bootdevice = 0;
+char *bootpath = NULL;
+char *bootdevice = NULL;
int boot_cpuid = 0;
#define MAX_CPU_THREADS 2
-struct device_node *allnodes = 0;
+struct device_node *allnodes = NULL;
/* use when traversing tree through the allnext, child, sibling,
* or parent members of struct device_node.
*/
unsigned long offset = reloc_offset();
struct prom_t *_prom = PTRRELOC(&prom);
va_list list;
-
+
_prom->args.service = ADDR(service);
_prom->args.nargs = nargs;
_prom->args.nret = nret;
- _prom->args.rets = (prom_arg_t *)&(_prom->args.args[nargs]);
+ _prom->args.rets = (prom_arg_t *)&(_prom->args.args[nargs]);
- va_start(list, nret);
+ va_start(list, nret);
for (i=0; i < nargs; i++)
_prom->args.args[i] = va_arg(list, prom_arg_t);
- va_end(list);
+ va_end(list);
for (i=0; i < nret ;i++)
_prom->args.rets[i] = 0;
static void __init prom_print_hex(unsigned long val)
{
unsigned long offset = reloc_offset();
- int i, nibbles = sizeof(val)*2;
- char buf[sizeof(val)*2+1];
+ int i, nibbles = sizeof(val)*2;
+ char buf[sizeof(val)*2+1];
struct prom_t *_prom = PTRRELOC(&prom);
- for (i = nibbles-1; i >= 0; i--) {
- buf[i] = (val & 0xf) + '0';
- if (buf[i] > '9')
- buf[i] += ('a'-'0'-10);
- val >>= 4;
- }
- buf[nibbles] = '\0';
+ for (i = nibbles-1; i >= 0; i--) {
+ buf[i] = (val & 0xf) + '0';
+ if (buf[i] > '9')
+ buf[i] += ('a'-'0'-10);
+ val >>= 4;
+ }
+ buf[nibbles] = '\0';
call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
}
{
phandle node;
char type[64];
- unsigned long num_cpus = 0;
- unsigned long offset = reloc_offset();
+ unsigned long num_cpus = 0;
+ unsigned long offset = reloc_offset();
struct prom_t *_prom = PTRRELOC(&prom);
- struct naca_struct *_naca = RELOC(naca);
- struct systemcfg *_systemcfg = RELOC(systemcfg);
+ struct naca_struct *_naca = RELOC(naca);
+ struct systemcfg *_systemcfg = RELOC(systemcfg);
/* NOTE: _naca->debug_switch is already initialized. */
prom_debug("prom_initialize_naca: start...\n");
_naca->pftSize = 0; /* ilog2 of htab size. computed below. */
- for (node = 0; prom_next_node(&node); ) {
- type[0] = 0;
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
- if (!strcmp(type, RELOC("cpu"))) {
+ if (!strcmp(type, RELOC("cpu"))) {
num_cpus += 1;
/* We're assuming *all* of the CPUs have the same
_naca->pftSize = pft_size[1];
}
}
- } else if (!strcmp(type, RELOC("serial"))) {
+ } else if (!strcmp(type, RELOC("serial"))) {
phandle isa, pci;
struct isa_reg_property reg;
union pci_range ranges;
((((unsigned long)ranges.pci64.phys_hi) << 32) |
(ranges.pci64.phys_lo)) + reg.address;
}
- }
+ }
}
if (_systemcfg->platform == PLATFORM_POWERMAC)
}
/* We gotta have at least 1 cpu... */
- if ( (_systemcfg->processorCount = num_cpus) < 1 )
- PROM_BUG();
+ if ( (_systemcfg->processorCount = num_cpus) < 1 )
+ PROM_BUG();
_systemcfg->physicalMemorySize = lmb_phys_mem_size();
_systemcfg->version.minor = SYSTEMCFG_MINOR;
_systemcfg->processor = _get_PVR();
- prom_debug("systemcfg->processorCount = 0x%x\n",
+ prom_debug("systemcfg->processorCount = 0x%x\n",
_systemcfg->processorCount);
- prom_debug("systemcfg->physicalMemorySize = 0x%x\n",
+ prom_debug("systemcfg->physicalMemorySize = 0x%x\n",
_systemcfg->physicalMemorySize);
- prom_debug("naca->pftSize = 0x%x\n",
+ prom_debug("naca->pftSize = 0x%x\n",
_naca->pftSize);
- prom_debug("systemcfg->dCacheL1LineSize = 0x%x\n",
+ prom_debug("systemcfg->dCacheL1LineSize = 0x%x\n",
_systemcfg->dCacheL1LineSize);
- prom_debug("systemcfg->iCacheL1LineSize = 0x%x\n",
+ prom_debug("systemcfg->iCacheL1LineSize = 0x%x\n",
_systemcfg->iCacheL1LineSize);
- prom_debug("naca->serialPortAddr = 0x%x\n",
+ prom_debug("naca->serialPortAddr = 0x%x\n",
_naca->serialPortAddr);
- prom_debug("naca->interrupt_controller = 0x%x\n",
+ prom_debug("naca->interrupt_controller = 0x%x\n",
_naca->interrupt_controller);
- prom_debug("systemcfg->platform = 0x%x\n",
+ prom_debug("systemcfg->platform = 0x%x\n",
_systemcfg->platform);
prom_debug("prom_initialize_naca: end...\n");
}
#ifdef DEBUG_PROM
void prom_dump_lmb(void)
{
- unsigned long i;
- unsigned long offset = reloc_offset();
+ unsigned long i;
+ unsigned long offset = reloc_offset();
struct lmb *_lmb = PTRRELOC(&lmb);
- prom_printf("\nprom_dump_lmb:\n");
- prom_printf(" memory.cnt = 0x%x\n",
+ prom_printf("\nprom_dump_lmb:\n");
+ prom_printf(" memory.cnt = 0x%x\n",
_lmb->memory.cnt);
- prom_printf(" memory.size = 0x%x\n",
+ prom_printf(" memory.size = 0x%x\n",
_lmb->memory.size);
- for (i=0; i < _lmb->memory.cnt ;i++) {
- prom_printf(" memory.region[0x%x].base = 0x%x\n",
+ for (i=0; i < _lmb->memory.cnt ;i++) {
+ prom_printf(" memory.region[0x%x].base = 0x%x\n",
i, _lmb->memory.region[i].base);
- prom_printf(" .physbase = 0x%x\n",
+ prom_printf(" .physbase = 0x%x\n",
_lmb->memory.region[i].physbase);
- prom_printf(" .size = 0x%x\n",
+ prom_printf(" .size = 0x%x\n",
_lmb->memory.region[i].size);
- }
+ }
- prom_printf("\n reserved.cnt = 0x%x\n",
+ prom_printf("\n reserved.cnt = 0x%x\n",
_lmb->reserved.cnt);
- prom_printf(" reserved.size = 0x%x\n",
+ prom_printf(" reserved.size = 0x%x\n",
_lmb->reserved.size);
- for (i=0; i < _lmb->reserved.cnt ;i++) {
- prom_printf(" reserved.region[0x%x\n].base = 0x%x\n",
+ for (i=0; i < _lmb->reserved.cnt ;i++) {
+ prom_printf(" reserved.region[0x%x\n].base = 0x%x\n",
i, _lmb->reserved.region[i].base);
- prom_printf(" .physbase = 0x%x\n",
+ prom_printf(" .physbase = 0x%x\n",
_lmb->reserved.region[i].physbase);
- prom_printf(" .size = 0x%x\n",
+ prom_printf(" .size = 0x%x\n",
_lmb->reserved.region[i].size);
- }
+ }
}
#endif /* DEBUG_PROM */
{
phandle node;
char type[64];
- unsigned long i, offset = reloc_offset();
+ unsigned long i, offset = reloc_offset();
struct prom_t *_prom = PTRRELOC(&prom);
- struct systemcfg *_systemcfg = RELOC(systemcfg);
+ struct systemcfg *_systemcfg = RELOC(systemcfg);
union lmb_reg_property reg;
unsigned long lmb_base, lmb_size;
unsigned long num_regs, bytes_per_reg = (_prom->encode_phys_size*2)/8;
if (_systemcfg->platform == PLATFORM_POWERMAC)
bytes_per_reg = 12;
- for (node = 0; prom_next_node(&node); ) {
- type[0] = 0;
- prom_getprop(node, "device_type", type, sizeof(type));
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
+ prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, RELOC("memory")))
+ if (strcmp(type, RELOC("memory")))
continue;
num_regs = prom_getprop(node, "reg", ®, sizeof(reg))
struct rtas_t *_rtas = PTRRELOC(&rtas);
struct systemcfg *_systemcfg = RELOC(systemcfg);
ihandle prom_rtas;
- u32 getprop_rval;
+ u32 getprop_rval;
char hypertas_funcs[4];
prom_debug("prom_instantiate_rtas: start...\n");
prom_getprop(prom_rtas, "rtas-size",
&getprop_rval, sizeof(getprop_rval));
- _rtas->size = getprop_rval;
+ _rtas->size = getprop_rval;
prom_printf("instantiating rtas");
if (_rtas->size != 0) {
unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
prom_printf(" done\n");
}
- prom_debug("rtas->base = 0x%x\n", _rtas->base);
- prom_debug("rtas->entry = 0x%x\n", _rtas->entry);
- prom_debug("rtas->size = 0x%x\n", _rtas->size);
+ prom_debug("rtas->base = 0x%x\n", _rtas->base);
+ prom_debug("rtas->entry = 0x%x\n", _rtas->entry);
+ prom_debug("rtas->size = 0x%x\n", _rtas->size);
}
prom_debug("prom_instantiate_rtas: end...\n");
}
{
phandle node;
ihandle phb_node;
- unsigned long offset = reloc_offset();
+ unsigned long offset = reloc_offset();
char compatible[64], path[64], type[64], model[64];
unsigned long i, table = 0;
unsigned long base, vbase, align;
/* Keep the old logic in tack to avoid regression. */
if (compatible[0] != 0) {
- if((strstr(compatible, RELOC("python")) == NULL) &&
- (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
- (strstr(compatible, RELOC("Winnipeg")) == NULL))
+ if ((strstr(compatible, RELOC("python")) == NULL) &&
+ (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
+ (strstr(compatible, RELOC("Winnipeg")) == NULL))
continue;
} else if (model[0] != 0) {
if ((strstr(model, RELOC("ython")) == NULL) &&
/* Call OF to setup the TCE hardware */
if (call_prom("package-to-path", 3, 1, node,
path, sizeof(path)-1) == PROM_ERROR) {
- prom_printf("package-to-path failed\n");
- } else {
- prom_printf("opening PHB %s", path);
- }
-
- phb_node = call_prom("open", 1, 1, path);
- if ( (long)phb_node <= 0) {
- prom_printf("... failed\n");
- } else {
- prom_printf("... done\n");
- }
- call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
+ prom_printf("package-to-path failed\n");
+ } else {
+ prom_printf("opening PHB %s", path);
+ }
+
+ phb_node = call_prom("open", 1, 1, path);
+ if ( (long)phb_node <= 0) {
+ prom_printf("... failed\n");
+ } else {
+ prom_printf("... done\n");
+ }
+ call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
phb_node, -1, minsize,
(u32) base, (u32) (base >> 32));
- call_prom("close", 1, 0, phb_node);
+ call_prom("close", 1, 0, phb_node);
table++;
}
unsigned int cpu_threads, hw_cpu_num;
int propsize;
extern void __secondary_hold(void);
- extern unsigned long __secondary_hold_spinloop;
- extern unsigned long __secondary_hold_acknowledge;
- unsigned long *spinloop
+ extern unsigned long __secondary_hold_spinloop;
+ extern unsigned long __secondary_hold_acknowledge;
+ unsigned long *spinloop
= (void *)virt_to_abs(&__secondary_hold_spinloop);
- unsigned long *acknowledge
+ unsigned long *acknowledge
= (void *)virt_to_abs(&__secondary_hold_acknowledge);
- unsigned long secondary_hold
+ unsigned long secondary_hold
= virt_to_abs(*PTRRELOC((unsigned long *)__secondary_hold));
- struct systemcfg *_systemcfg = RELOC(systemcfg);
+ struct systemcfg *_systemcfg = RELOC(systemcfg);
struct paca_struct *lpaca = PTRRELOC(&paca[0]);
struct prom_t *_prom = PTRRELOC(&prom);
#ifdef CONFIG_SMP
prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
- /* Set the common spinloop variable, so all of the secondary cpus
+ /* Set the common spinloop variable, so all of the secondary cpus
* will block when they are awakened from their OF spinloop.
* This must occur for both SMP and non SMP kernels, since OF will
* be trashed when we move the kernel.
- */
- *spinloop = 0;
+ */
+ *spinloop = 0;
#ifdef CONFIG_HMT
for (i=0; i < NR_CPUS; i++) {
if (strcmp(type, RELOC("okay")) != 0)
continue;
- reg = -1;
+ reg = -1;
prom_getprop(node, "reg", ®, sizeof(reg));
path = (char *) mem;
ihandle prom_options = 0;
char option[9];
unsigned long offset = reloc_offset();
- struct naca_struct *_naca = RELOC(naca);
+ struct naca_struct *_naca = RELOC(naca);
char found = 0;
if (strstr(RELOC(cmd_line), RELOC("smt-enabled="))) {
struct prom_t *_prom = PTRRELOC(&prom);
u32 val;
- if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
- prom_panic("cannot find stdout");
+ if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
+ prom_panic("cannot find stdout");
- _prom->stdout = val;
+ _prom->stdout = val;
}
static int __init prom_find_machine_type(void)
ihandle ih;
int i, j;
unsigned long offset = reloc_offset();
- struct prom_t *_prom = PTRRELOC(&prom);
+ struct prom_t *_prom = PTRRELOC(&prom);
char type[16], *path;
static unsigned char default_colors[] = {
0x00, 0x00, 0x00,
break;
#endif /* CONFIG_LOGO_LINUX_CLUT224 */
}
-
+
return DOUBLEWORD_ALIGN(mem);
}
unsigned long needed, unsigned long align)
{
void *ret;
- unsigned long offset = reloc_offset();
*mem_start = ALIGN(*mem_start, align);
if (*mem_start + needed > *mem_end) {
#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long offset = reloc_offset();
/* FIXME: Apple OF doesn't map unclaimed mem. If this
* ever happened on G5, we'd need to fix. */
unsigned long initrd_len;
prom_panic("couldn't get device tree root\n");
}
allnextp = &RELOC(allnodes);
- inspect_node(root, 0, &mem_start, &mem_end, &allnextp);
- *allnextp = 0;
+ inspect_node(root, NULL, &mem_start, &mem_end, &allnextp);
+ *allnextp = NULL;
return mem_start;
}
{
struct bi_record *first, *last;
- prom_debug("birec_verify: r6=0x%x\n", (unsigned long)bi_recs);
+ prom_debug("birec_verify: r6=0x%x\n", (unsigned long)bi_recs);
if (bi_recs != NULL)
prom_debug(" tag=0x%x\n", bi_recs->tag);
last = (struct bi_record *)(long)bi_recs->data[0];
- prom_debug(" last=0x%x\n", (unsigned long)last);
+ prom_debug(" last=0x%x\n", (unsigned long)last);
if (last != NULL)
prom_debug(" last_tag=0x%x\n", last->tag);
return NULL;
first = (struct bi_record *)(long)last->data[0];
- prom_debug(" first=0x%x\n", (unsigned long)first);
+ prom_debug(" first=0x%x\n", (unsigned long)first);
if ( first == NULL || first != bi_recs )
return NULL;
/* Init prom stdout device */
prom_init_stdout();
- prom_debug("klimit=0x%x\n", RELOC(klimit));
- prom_debug("offset=0x%x\n", offset);
- prom_debug("->mem=0x%x\n", RELOC(klimit) - offset);
+ prom_debug("klimit=0x%x\n", RELOC(klimit));
+ prom_debug("offset=0x%x\n", offset);
+ prom_debug("->mem=0x%x\n", RELOC(klimit) - offset);
/* check out if we have bi_recs */
_prom->bi_recs = prom_bi_rec_verify((struct bi_record *)r6);
copy_and_flush(0, KERNELBASE - offset, 0x100, 0);
/* Start storing things at klimit */
- mem = RELOC(klimit) - offset;
+ mem = RELOC(klimit) - offset;
/* Get the full OF pathname of the stdout device */
p = (char *) mem;
_prom->encode_phys_size = (getprop_rval == 1) ? 32 : 64;
/* Determine which cpu is actually running right _now_ */
- if (prom_getprop(_prom->chosen, "cpu",
+ if (prom_getprop(_prom->chosen, "cpu",
&prom_cpu, sizeof(prom_cpu)) <= 0)
- prom_panic("cannot find boot cpu");
+ prom_panic("cannot find boot cpu");
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
RELOC(boot_cpuid) = 0;
- prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
+ prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
/* Get the boot device and translate it to a full OF pathname. */
p = (char *) mem;
if (_systemcfg->platform != PLATFORM_POWERMAC)
prom_instantiate_rtas();
- /* Initialize some system info into the Naca early... */
- prom_initialize_naca();
+ /* Initialize some system info into the Naca early... */
+ prom_initialize_naca();
smt_setup();
- /* If we are on an SMP machine, then we *MUST* do the
- * following, regardless of whether we have an SMP
- * kernel or not.
- */
+ /* If we are on an SMP machine, then we *MUST* do the
+ * following, regardless of whether we have an SMP
+ * kernel or not.
+ */
prom_hold_cpus(mem);
- prom_debug("after basic inits, mem=0x%x\n", mem);
+ prom_debug("after basic inits, mem=0x%x\n", mem);
#ifdef CONFIG_BLK_DEV_INITRD
prom_debug("initrd_start=0x%x\n", RELOC(initrd_start));
prom_debug("initrd_end=0x%x\n", RELOC(initrd_end));
RELOC(klimit) = mem + offset;
prom_debug("new klimit is\n");
- prom_debug("klimit=0x%x\n", RELOC(klimit));
+ prom_debug("klimit=0x%x\n", RELOC(klimit));
prom_debug(" ->mem=0x%x\n", mem);
lmb_reserve(0, __pa(RELOC(klimit)));
* Find out the size of each entry of the interrupts property
* for a node.
*/
-static int __devinit
-prom_n_intr_cells(struct device_node *np)
+int __devinit prom_n_intr_cells(struct device_node *np)
{
struct device_node *p;
unsigned int *icp;
|| get_property(p, "interrupt-map", NULL) != NULL) {
printk("oops, node %s doesn't have #interrupt-cells\n",
p->full_name);
- return 1;
+ return 1;
}
}
#ifdef DEBUG_IRQ
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct pci_reg_property)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = pci_addrs[i].addr.a_hi;
adr[i].address = pci_addrs[i].addr.a_lo;
adr[i].size = pci_addrs[i].size_lo;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct reg_property32)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = 2;
adr[i].address = rp[i].address + base_address;
adr[i].size = rp[i].size;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct reg_property32)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = 2;
adr[i].address = rp[i].address + base_address;
adr[i].size = rp[i].size;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= sizeof(struct reg_property)) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = rp[i].space;
adr[i].address = rp[i].address;
adr[i].size = rp[i].size;
i = 0;
adr = (struct address_range *) mem_start;
while ((l -= rpsize) >= 0) {
- if (!measure_only) {
+ if (!measure_only) {
adr[i].space = 0;
adr[i].address = rp[naddrc - 1];
adr[i].size = rp[naddrc + nsizec - 1];
struct device_node *child;
int *ip;
- np->name = get_property(np, "name", 0);
- np->type = get_property(np, "device_type", 0);
+ np->name = get_property(np, "name", NULL);
+ np->type = get_property(np, "device_type", NULL);
if (!np->name)
np->name = "<NULL>";
mem_start = finish_node_interrupts(np, mem_start, measure_only);
/* Look for #address-cells and #size-cells properties. */
- ip = (int *) get_property(np, "#address-cells", 0);
+ ip = (int *) get_property(np, "#address-cells", NULL);
if (ip != NULL)
naddrc = *ip;
- ip = (int *) get_property(np, "#size-cells", 0);
+ ip = (int *) get_property(np, "#size-cells", NULL);
if (ip != NULL)
nsizec = *ip;
* expect for the name -- Cort
*/
if (!strcmp(np->name, "display"))
- np->name = get_property(np, "compatible", 0);
+ np->name = get_property(np, "compatible", NULL);
if (!strcmp(np->name, "device-tree") || np->parent == NULL)
ifunc = interpret_root_props;
return mem_start;
}
-/*
+/**
* finish_device_tree is called once things are running normally
* (i.e. with text and data mapped to the address they were linked at).
* It traverses the device tree and fills in the name, type,
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#address-cells", 0);
+ ip = (int *) get_property(np, "#address-cells", NULL);
if (ip != NULL)
return *ip;
} while (np->parent);
do {
if (np->parent)
np = np->parent;
- ip = (int *) get_property(np, "#size-cells", 0);
+ ip = (int *) get_property(np, "#size-cells", NULL);
if (ip != NULL)
return *ip;
} while (np->parent);
return 1;
}
-/*
+/**
* Work out the sense (active-low level / active-high edge)
* of each interrupt from the device tree.
*/
}
}
-/*
+/**
* Construct and return a list of the device_nodes with a given name.
*/
struct device_node *
prevp = &np->next;
}
}
- *prevp = 0;
+ *prevp = NULL;
return head;
}
-/*
+/**
* Construct and return a list of the device_nodes with a given type.
*/
struct device_node *
prevp = &np->next;
}
}
- *prevp = 0;
+ *prevp = NULL;
return head;
}
-/*
+/**
* Returns all nodes linked together
*/
struct device_node *
*prevp = np;
prevp = &np->next;
}
- *prevp = 0;
+ *prevp = NULL;
return head;
}
-/* Checks if the given "compat" string matches one of the strings in
+/** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
int
}
-/*
+/**
* Indicates whether the root node has a given value in its
* compatible property.
*/
{
struct device_node *root;
int rc = 0;
-
+
root = of_find_node_by_path("/");
if (root) {
rc = device_is_compatible(root, compat);
return rc;
}
-/*
+/**
* Construct and return a list of the device_nodes with a given type
* and compatible property.
*/
prevp = &np->next;
}
}
- *prevp = 0;
+ *prevp = NULL;
return head;
}
-/*
+/**
* Find the device_node with a given full_name.
*/
struct device_node *
u32 *regs;
int err = 0;
phandle *ibm_phandle;
-
- node->name = get_property(node, "name", 0);
- node->type = get_property(node, "device_type", 0);
+
+ node->name = get_property(node, "name", NULL);
+ node->type = get_property(node, "device_type", NULL);
if (!parent) {
err = -ENODEV;
}
/* now do the work of finish_node_interrupts */
- if (get_property(node, "interrupts", 0)) {
+ if (get_property(node, "interrupts", NULL)) {
err = of_finish_dynamic_node_interrupts(node);
if (err) goto out;
}
- /* now do the rough equivalent of update_dn_pci_info, this
- * probably is not correct for phb's, but should work for
- * IOAs and slots.
- */
+ /* now do the rough equivalent of update_dn_pci_info, this
+ * probably is not correct for phb's, but should work for
+ * IOAs and slots.
+ */
- node->phb = parent->phb;
+ node->phb = parent->phb;
- regs = (u32 *)get_property(node, "reg", 0);
- if (regs) {
- node->busno = (regs[0] >> 16) & 0xff;
- node->devfn = (regs[0] >> 8) & 0xff;
- }
+ regs = (u32 *)get_property(node, "reg", NULL);
+ if (regs) {
+ node->busno = (regs[0] >> 16) & 0xff;
+ node->devfn = (regs[0] >> 8) & 0xff;
+ }
/* fixing up iommu_table */
- if(strcmp(node->name, "pci") == 0 &&
- get_property(node, "ibm,dma-window", NULL)) {
- node->bussubno = node->busno;
- iommu_devnode_init(node);
- }
- else
+ if (strcmp(node->name, "pci") == 0 &&
+ get_property(node, "ibm,dma-window", NULL)) {
+ node->bussubno = node->busno;
+ iommu_devnode_init(node);
+ } else
node->iommu_table = parent->iommu_table;
out:
*lenp = pp->length;
return pp->value;
}
- return 0;
+ return NULL;
}
/*
break;
}
-
+ case PTRACE_GETEVENTMSG:
+ ret = put_user(child->ptrace_message, (unsigned int __user *) data);
+ break;
default:
ret = ptrace_request(child, request, addr, data);
#include <asm/rtas.h>
#include <asm/ppcdebug.h>
+static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
+static spinlock_t ras_log_buf_lock = SPIN_LOCK_UNLOCKED;
+
+static int ras_get_sensor_state_token;
+static int ras_check_exception_token;
+
+#define EPOW_SENSOR_TOKEN 9
+#define EPOW_SENSOR_INDEX 0
+#define RAS_VECTOR_OFFSET 0x500
+
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id,
struct pt_regs * regs);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id,
/* #define DEBUG */
+static void request_ras_irqs(struct device_node *np, char *propname,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ const char *name)
+{
+ unsigned int *ireg, len, i;
+ int virq, n_intr;
+
+ ireg = (unsigned int *)get_property(np, propname, &len);
+ if (ireg == NULL)
+ return;
+ n_intr = prom_n_intr_cells(np);
+ len /= n_intr * sizeof(*ireg);
+
+ for (i = 0; i < len; i++) {
+ virq = virt_irq_create_mapping(*ireg);
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR "Unable to allocate interrupt "
+ "number for %s\n", np->full_name);
+ return;
+ }
+ if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) {
+ printk(KERN_ERR "Unable to request interrupt %d for "
+ "%s\n", irq_offset_up(virq), np->full_name);
+ return;
+ }
+ ireg += n_intr;
+ }
+}
+
/*
* Initialize handlers for the set of interrupts caused by hardware errors
* and power system events.
static int __init init_ras_IRQ(void)
{
struct device_node *np;
- unsigned int *ireg, len, i;
- int virq;
-
- if ((np = of_find_node_by_path("/event-sources/internal-errors")) &&
- (ireg = (unsigned int *)get_property(np, "open-pic-interrupt",
- &len))) {
- for (i=0; i<(len / sizeof(*ireg)); i++) {
- virq = virt_irq_create_mapping(*(ireg));
- if (virq == NO_IRQ) {
- printk(KERN_ERR "Unable to allocate interrupt "
- "number for %s\n", np->full_name);
- break;
- }
- request_irq(irq_offset_up(virq),
- ras_error_interrupt, 0,
- "RAS_ERROR", NULL);
- ireg++;
- }
+
+ ras_get_sensor_state_token = rtas_token("get-sensor-state");
+ ras_check_exception_token = rtas_token("check-exception");
+
+ /* Internal Errors */
+ np = of_find_node_by_path("/event-sources/internal-errors");
+ if (np != NULL) {
+ request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt,
+ "RAS_ERROR");
+ request_ras_irqs(np, "interrupts", ras_error_interrupt,
+ "RAS_ERROR");
+ of_node_put(np);
}
- of_node_put(np);
-
- if ((np = of_find_node_by_path("/event-sources/epow-events")) &&
- (ireg = (unsigned int *)get_property(np, "open-pic-interrupt",
- &len))) {
- for (i=0; i<(len / sizeof(*ireg)); i++) {
- virq = virt_irq_create_mapping(*(ireg));
- if (virq == NO_IRQ) {
- printk(KERN_ERR "Unable to allocate interrupt "
- " number for %s\n", np->full_name);
- break;
- }
- request_irq(irq_offset_up(virq),
- ras_epow_interrupt, 0,
- "RAS_EPOW", NULL);
- ireg++;
- }
+
+ /* EPOW Events */
+ np = of_find_node_by_path("/event-sources/epow-events");
+ if (np != NULL) {
+ request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt,
+ "RAS_EPOW");
+ request_ras_irqs(np, "interrupts", ras_epow_interrupt,
+ "RAS_EPOW");
+ of_node_put(np);
}
- of_node_put(np);
return 1;
}
__initcall(init_ras_IRQ);
-static struct rtas_error_log log_buf;
-static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
-
/*
* Handle power subsystem events (EPOW).
*
static irqreturn_t
ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
- struct rtas_error_log log_entry;
- unsigned int size = sizeof(log_entry);
int status = 0xdeadbeef;
+ int state = 0;
+ int critical;
- spin_lock(&log_lock);
+ status = rtas_call(ras_get_sensor_state_token, 2, 2, &state,
+ EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX);
- status = rtas_call(rtas_token("check-exception"), 6, 1, NULL,
- 0x500, irq,
- RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
- 1, /* Time Critical */
- __pa(&log_buf), size);
+ if (state > 3)
+ critical = 1; /* Time Critical */
+ else
+ critical = 0;
- log_entry = log_buf;
+ spin_lock(&ras_log_buf_lock);
- spin_unlock(&log_lock);
+ status = rtas_call(ras_check_exception_token, 6, 1, NULL,
+ RAS_VECTOR_OFFSET,
+ virt_irq_to_real(irq_offset_down(irq)),
+ RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
+ critical, __pa(&ras_log_buf), RTAS_ERROR_LOG_MAX);
- udbg_printf("EPOW <0x%lx 0x%x>\n",
- *((unsigned long *)&log_entry), status);
- printk(KERN_WARNING
- "EPOW <0x%lx 0x%x>\n",*((unsigned long *)&log_entry), status);
+ udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n",
+ *((unsigned long *)&ras_log_buf), status, state);
+ printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n",
+ *((unsigned long *)&ras_log_buf), status, state);
/* format and print the extended information */
- log_error((char *)&log_entry, ERR_TYPE_RTAS_LOG, 0);
-
+ log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
+
+ spin_unlock(&ras_log_buf_lock);
return IRQ_HANDLED;
}
static irqreturn_t
ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
- struct rtas_error_log log_entry;
- unsigned int size = sizeof(log_entry);
+ struct rtas_error_log *rtas_elog;
int status = 0xdeadbeef;
int fatal;
- spin_lock(&log_lock);
+ spin_lock(&ras_log_buf_lock);
- status = rtas_call(rtas_token("check-exception"), 6, 1, NULL,
- 0x500, irq,
- RTAS_INTERNAL_ERROR,
- 1, /* Time Critical */
- __pa(&log_buf), size);
+ status = rtas_call(ras_check_exception_token, 6, 1, NULL,
+ RAS_VECTOR_OFFSET,
+ virt_irq_to_real(irq_offset_down(irq)),
+ RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
+ __pa(&ras_log_buf), RTAS_ERROR_LOG_MAX);
- log_entry = log_buf;
+ rtas_elog = (struct rtas_error_log *)ras_log_buf;
- spin_unlock(&log_lock);
-
- if ((status == 0) && (log_entry.severity >= SEVERITY_ERROR_SYNC))
+ if ((status == 0) && (rtas_elog->severity >= SEVERITY_ERROR_SYNC))
fatal = 1;
else
fatal = 0;
/* format and print the extended information */
- log_error((char *)&log_entry, ERR_TYPE_RTAS_LOG, fatal);
+ log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
if (fatal) {
- udbg_printf("HW Error <0x%lx 0x%x>\n",
- *((unsigned long *)&log_entry), status);
- printk(KERN_EMERG
- "Error: Fatal hardware error <0x%lx 0x%x>\n",
- *((unsigned long *)&log_entry), status);
+ udbg_printf("Fatal HW Error <0x%lx 0x%x>\n",
+ *((unsigned long *)&ras_log_buf), status);
+ printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n",
+ *((unsigned long *)&ras_log_buf), status);
#ifndef DEBUG
/* Don't actually power off when debugging so we can test
#endif
} else {
udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n",
- *((unsigned long *)&log_entry), status);
- printk(KERN_WARNING
+ *((unsigned long *)&ras_log_buf), status);
+ printk(KERN_WARNING
"Warning: Recoverable hardware error <0x%lx 0x%x>\n",
- *((unsigned long *)&log_entry), status);
+ *((unsigned long *)&ras_log_buf), status);
}
+
+ spin_unlock(&ras_log_buf_lock);
return IRQ_HANDLED;
}
#include <linux/time.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
/* ****************************************************************** */
/* Declarations */
-static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
- int count, int *eof, void *data);
-static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
- size_t count, loff_t *ppos);
-static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos);
-static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
- size_t count, loff_t *ppos);
-static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos);
-static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
- size_t count, loff_t *ppos);
-static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos);
-
-static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos);
-static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
- size_t count, loff_t *ppos);
-static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos);
-static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
- size_t count, loff_t *ppos);
-static ssize_t ppc_rtas_rmo_buf_read(struct file *file, char *buf,
- size_t count, loff_t *ppos);
+static int ppc_rtas_sensors_show(struct seq_file *m, void *v);
+static int ppc_rtas_clock_show(struct seq_file *m, void *v);
+static ssize_t ppc_rtas_clock_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+static int ppc_rtas_progress_show(struct seq_file *m, void *v);
+static ssize_t ppc_rtas_progress_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+static int ppc_rtas_poweron_show(struct seq_file *m, void *v);
+static ssize_t ppc_rtas_poweron_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+
+static ssize_t ppc_rtas_tone_freq_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v);
+static ssize_t ppc_rtas_tone_volume_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v);
+static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v);
+
+static int sensors_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ppc_rtas_sensors_show, NULL);
+}
+
+struct file_operations ppc_rtas_sensors_operations = {
+ .open = sensors_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int poweron_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ppc_rtas_poweron_show, NULL);
+}
struct file_operations ppc_rtas_poweron_operations = {
- .read = ppc_rtas_poweron_read,
- .write = ppc_rtas_poweron_write
+ .open = poweron_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = ppc_rtas_poweron_write,
+ .release = single_release,
};
+
+static int progress_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ppc_rtas_progress_show, NULL);
+}
+
struct file_operations ppc_rtas_progress_operations = {
- .read = ppc_rtas_progress_read,
- .write = ppc_rtas_progress_write
+ .open = progress_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = ppc_rtas_progress_write,
+ .release = single_release,
};
+static int clock_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ppc_rtas_clock_show, NULL);
+}
+
struct file_operations ppc_rtas_clock_operations = {
- .read = ppc_rtas_clock_read,
- .write = ppc_rtas_clock_write
+ .open = clock_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = ppc_rtas_clock_write,
+ .release = single_release,
};
+static int tone_freq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ppc_rtas_tone_freq_show, NULL);
+}
+
struct file_operations ppc_rtas_tone_freq_operations = {
- .read = ppc_rtas_tone_freq_read,
- .write = ppc_rtas_tone_freq_write
+ .open = tone_freq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = ppc_rtas_tone_freq_write,
+ .release = single_release,
};
+
+static int tone_volume_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ppc_rtas_tone_volume_show, NULL);
+}
+
struct file_operations ppc_rtas_tone_volume_operations = {
- .read = ppc_rtas_tone_volume_read,
- .write = ppc_rtas_tone_volume_write
+ .open = tone_volume_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = ppc_rtas_tone_volume_write,
+ .release = single_release,
};
-static struct file_operations ppc_rtas_rmo_buf_ops = {
- .read = ppc_rtas_rmo_buf_read,
+static int rmo_buf_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ppc_rtas_rmo_buf_show, NULL);
+}
+
+struct file_operations ppc_rtas_rmo_buf_ops = {
+ .open = rmo_buf_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
-int ppc_rtas_find_all_sensors (void);
-int ppc_rtas_process_sensor(struct individual_sensor s, int state,
- int error, char * buf);
-char * ppc_rtas_process_error(int error);
-int get_location_code(struct individual_sensor s, char * buf);
-int check_location_string (char *c, char * buf);
-int check_location (char *c, int idx, char * buf);
+static int ppc_rtas_find_all_sensors(void);
+static void ppc_rtas_process_sensor(struct seq_file *m,
+ struct individual_sensor *s, int state, int error, char *loc);
+static char *ppc_rtas_process_error(int error);
+static void get_location_code(struct seq_file *m,
+ struct individual_sensor *s, char *loc);
+static void check_location_string(struct seq_file *m, char *c);
+static void check_location(struct seq_file *m, char *c);
static int __init proc_rtas_init(void)
{
if (entry)
entry->proc_fops = &ppc_rtas_poweron_operations;
- create_proc_read_entry("ppc64/rtas/sensors", S_IRUGO, NULL,
- ppc_rtas_sensor_read, NULL);
+ entry = create_proc_entry("ppc64/rtas/sensors", S_IRUGO, NULL);
+ if (entry)
+ entry->proc_fops = &ppc_rtas_sensors_operations;
entry = create_proc_entry("ppc64/rtas/frequency", S_IWUSR|S_IRUGO,
NULL);
__initcall(proc_rtas_init);
+static int parse_number(const char __user *p, size_t count, unsigned long *val)
+{
+ char buf[40];
+ char *end;
+
+ if (count > 39)
+ return -EINVAL;
+
+ if (copy_from_user(buf, p, count))
+ return -EFAULT;
+
+ buf[count] = 0;
+
+ *val = simple_strtoul(buf, &end, 10);
+ if (*end && *end != '\n')
+ return -EINVAL;
+
+ return 0;
+}
+
/* ****************************************************************** */
/* POWER-ON-TIME */
/* ****************************************************************** */
-static ssize_t ppc_rtas_poweron_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_poweron_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
{
- char stkbuf[40]; /* its small, its on stack */
struct rtc_time tm;
unsigned long nowtime;
- char *dest;
- int error;
+ int error = parse_number(buf, count, &nowtime);
+ if (error)
+ return error;
- if (39 < count) count = 39;
- if (copy_from_user (stkbuf, buf, count)) {
- return -EFAULT;
- }
- stkbuf[count] = 0;
- nowtime = simple_strtoul(stkbuf, &dest, 10);
- if (*dest != '\0' && *dest != '\n') {
- printk("ppc_rtas_poweron_write: Invalid time\n");
- return count;
- }
power_on_time = nowtime; /* save the time */
to_tm(nowtime, &tm);
error = rtas_call(rtas_token("set-time-for-power-on"), 7, 1, NULL,
tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */);
- if (error != 0)
+ if (error)
printk(KERN_WARNING "error: setting poweron time returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static ssize_t ppc_rtas_poweron_read(struct file * file, char * buf,
- size_t count, loff_t *ppos)
+static int ppc_rtas_poweron_show(struct seq_file *m, void *v)
{
- char stkbuf[40]; /* its small, its on stack */
- int n, sn;
if (power_on_time == 0)
- n = scnprintf(stkbuf,sizeof(stkbuf),"Power on time not set\n");
+ seq_printf(m, "Power on time not set\n");
else
- n = scnprintf(stkbuf,sizeof(stkbuf),"%lu\n",power_on_time);
-
- sn = strlen (stkbuf) +1;
- if (*ppos >= sn)
- return 0;
- if (n > sn - *ppos)
- n = sn - *ppos;
- if (n > count)
- n = count;
- if (copy_to_user (buf, stkbuf + (*ppos), n)) {
- return -EFAULT;
- }
- *ppos += n;
- return n;
+ seq_printf(m, "%lu\n",power_on_time);
+ return 0;
}
/* ****************************************************************** */
/* PROGRESS */
/* ****************************************************************** */
-static ssize_t ppc_rtas_progress_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_progress_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
{
unsigned long hex;
- if (count >= MAX_LINELENGTH) count = MAX_LINELENGTH -1;
- if (copy_from_user (progress_led, buf, count)) { /* save the string */
+ if (count >= MAX_LINELENGTH)
+ count = MAX_LINELENGTH -1;
+ if (copy_from_user(progress_led, buf, count)) { /* save the string */
return -EFAULT;
}
progress_led[count] = 0;
ppc_md.progress ((char *)progress_led, hex);
return count;
- /* clear the line */ /* ppc_md.progress(" ", 0xffff);*/
+ /* clear the line */
+ /* ppc_md.progress(" ", 0xffff);*/
}
/* ****************************************************************** */
-static ssize_t ppc_rtas_progress_read(struct file * file, char * buf,
- size_t count, loff_t *ppos)
+static int ppc_rtas_progress_show(struct seq_file *m, void *v)
{
- int sn, n = 0;
- char *tmpbuf;
-
- if (progress_led == NULL) return 0;
-
- tmpbuf = kmalloc (MAX_LINELENGTH, GFP_KERNEL);
- if (!tmpbuf) {
- printk(KERN_ERR "error: kmalloc failed\n");
- return -ENOMEM;
- }
- n = sprintf (tmpbuf, "%s\n", progress_led);
-
- sn = strlen (tmpbuf) +1;
- if (*ppos >= sn) {
- kfree (tmpbuf);
- return 0;
- }
- if (n > sn - *ppos)
- n = sn - *ppos;
- if (n > count)
- n = count;
- if (copy_to_user (buf, tmpbuf + (*ppos), n)) {
- kfree (tmpbuf);
- return -EFAULT;
- }
- kfree (tmpbuf);
- *ppos += n;
- return n;
+ if (progress_led)
+ seq_printf(m, "%s\n", progress_led);
+ return 0;
}
/* ****************************************************************** */
/* CLOCK */
/* ****************************************************************** */
-static ssize_t ppc_rtas_clock_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_clock_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
{
- char stkbuf[40]; /* its small, its on stack */
struct rtc_time tm;
unsigned long nowtime;
- char *dest;
- int error;
-
- if (39 < count) count = 39;
- if (copy_from_user (stkbuf, buf, count)) {
- return -EFAULT;
- }
- stkbuf[count] = 0;
- nowtime = simple_strtoul(stkbuf, &dest, 10);
- if (*dest != '\0' && *dest != '\n') {
- printk("ppc_rtas_clock_write: Invalid time\n");
- return count;
- }
+ int error = parse_number(buf, count, &nowtime);
+ if (error)
+ return error;
to_tm(nowtime, &tm);
error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, 0);
- if (error != 0)
+ if (error)
printk(KERN_WARNING "error: setting the clock returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static ssize_t ppc_rtas_clock_read(struct file * file, char * buf,
- size_t count, loff_t *ppos)
+static int ppc_rtas_clock_show(struct seq_file *m, void *v)
{
- unsigned int year, mon, day, hour, min, sec;
int ret[8];
- int n, sn, error;
- char stkbuf[40]; /* its small, its on stack */
+ int error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
- error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
-
- year = ret[0]; mon = ret[1]; day = ret[2];
- hour = ret[3]; min = ret[4]; sec = ret[5];
-
- if (error != 0){
+ if (error) {
printk(KERN_WARNING "error: reading the clock returned: %s\n",
ppc_rtas_process_error(error));
- n = scnprintf (stkbuf, sizeof(stkbuf), "0");
+ seq_printf(m, "0");
} else {
- n = scnprintf (stkbuf, sizeof(stkbuf), "%lu\n",
+ unsigned int year, mon, day, hour, min, sec;
+ year = ret[0]; mon = ret[1]; day = ret[2];
+ hour = ret[3]; min = ret[4]; sec = ret[5];
+ seq_printf(m, "%lu\n",
mktime(year, mon, day, hour, min, sec));
}
-
- sn = strlen (stkbuf) +1;
- if (*ppos >= sn)
- return 0;
- if (n > sn - *ppos)
- n = sn - *ppos;
- if (n > count)
- n = count;
- if (copy_to_user (buf, stkbuf + (*ppos), n)) {
- return -EFAULT;
- }
- *ppos += n;
- return n;
+ return 0;
}
/* ****************************************************************** */
/* SENSOR STUFF */
/* ****************************************************************** */
-static int ppc_rtas_sensor_read(char * buf, char ** start, off_t off,
- int count, int *eof, void *data)
+static int ppc_rtas_sensors_show(struct seq_file *m, void *v)
{
- int i,j,n;
+ int i,j;
int state, error;
- char *buffer;
int get_sensor_state = rtas_token("get-sensor-state");
- if (count < 0)
- return -EINVAL;
-
- /* May not be enough */
- buffer = kmalloc(MAX_LINELENGTH*MAX_SENSORS, GFP_KERNEL);
-
- if (!buffer)
- return -ENOMEM;
-
- memset(buffer, 0, MAX_LINELENGTH*MAX_SENSORS);
-
- n = sprintf ( buffer , "RTAS (RunTime Abstraction Services) Sensor Information\n");
- n += sprintf ( buffer+n, "Sensor\t\tValue\t\tCondition\tLocation\n");
- n += sprintf ( buffer+n, "********************************************************\n");
+ seq_printf(m, "RTAS (RunTime Abstraction Services) Sensor Information\n");
+ seq_printf(m, "Sensor\t\tValue\t\tCondition\tLocation\n");
+ seq_printf(m, "********************************************************\n");
if (ppc_rtas_find_all_sensors() != 0) {
- n += sprintf ( buffer+n, "\nNo sensors are available\n");
- goto return_string;
+ seq_printf(m, "\nNo sensors are available\n");
+ return 0;
}
for (i=0; i<sensors.quant; i++) {
- j = sensors.sensor[i].quant;
- /* A sensor may have multiple instances */
- while (j >= 0) {
+ struct individual_sensor *p = &sensors.sensor[i];
+ char rstr[64];
+ char *loc;
+ int llen, offs;
+
+ sprintf (rstr, SENSOR_PREFIX"%04d", p->token);
+ loc = (char *) get_property(rtas_node, rstr, &llen);
+ /* A sensor may have multiple instances */
+ for (j = 0, offs = 0; j <= p->quant; j++) {
error = rtas_call(get_sensor_state, 2, 2, &state,
- sensors.sensor[i].token,
- sensors.sensor[i].quant - j);
-
- n += ppc_rtas_process_sensor(sensors.sensor[i], state,
- error, buffer+n );
- n += sprintf (buffer+n, "\n");
- j--;
- } /* while */
- } /* for */
-
-return_string:
- if (off >= strlen(buffer)) {
- *eof = 1;
- kfree(buffer);
- return 0;
+ p->token, j);
+
+ ppc_rtas_process_sensor(m, p, state, error, loc);
+ seq_putc(m, '\n');
+ if (loc) {
+ offs += strlen(loc) + 1;
+ loc += strlen(loc) + 1;
+ if (offs >= llen)
+ loc = NULL;
+ }
+ }
}
- if (n > strlen(buffer) - off)
- n = strlen(buffer) - off;
- if (n > count)
- n = count;
- else
- *eof = 1;
-
- memcpy(buf, buffer + off, n);
- *start = buf;
- kfree(buffer);
- return n;
+ return 0;
}
/* ****************************************************************** */
-int ppc_rtas_find_all_sensors (void)
+static int ppc_rtas_find_all_sensors(void)
{
unsigned int *utmp;
int len, i;
/*
* Builds a string of what rtas returned
*/
-char * ppc_rtas_process_error(int error)
+static char *ppc_rtas_process_error(int error)
{
switch (error) {
case SENSOR_CRITICAL_HIGH:
* Builds a string out of what the sensor said
*/
-int ppc_rtas_process_sensor(struct individual_sensor s, int state,
- int error, char * buf)
+static void ppc_rtas_process_sensor(struct seq_file *m,
+ struct individual_sensor *s, int state, int error, char *loc)
{
/* Defined return vales */
const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t",
int num_states = 0;
int temperature = 0;
int unknown = 0;
- int n = 0;
/* What kind of sensor do we have here? */
- switch (s.token) {
+ switch (s->token) {
case KEY_SWITCH:
- n += sprintf(buf+n, "Key switch:\t");
+ seq_printf(m, "Key switch:\t");
num_states = sizeof(key_switch) / sizeof(char *);
if (state < num_states) {
- n += sprintf(buf+n, "%s\t", key_switch[state]);
+ seq_printf(m, "%s\t", key_switch[state]);
have_strings = 1;
}
break;
case ENCLOSURE_SWITCH:
- n += sprintf(buf+n, "Enclosure switch:\t");
+ seq_printf(m, "Enclosure switch:\t");
num_states = sizeof(enclosure_switch) / sizeof(char *);
if (state < num_states) {
- n += sprintf(buf+n, "%s\t",
+ seq_printf(m, "%s\t",
enclosure_switch[state]);
have_strings = 1;
}
break;
case THERMAL_SENSOR:
- n += sprintf(buf+n, "Temp. (°C/°F):\t");
+ seq_printf(m, "Temp. (°C/°F):\t");
temperature = 1;
break;
case LID_STATUS:
- n += sprintf(buf+n, "Lid status:\t");
+ seq_printf(m, "Lid status:\t");
num_states = sizeof(lid_status) / sizeof(char *);
if (state < num_states) {
- n += sprintf(buf+n, "%s\t", lid_status[state]);
+ seq_printf(m, "%s\t", lid_status[state]);
have_strings = 1;
}
break;
case POWER_SOURCE:
- n += sprintf(buf+n, "Power source:\t");
+ seq_printf(m, "Power source:\t");
num_states = sizeof(power_source) / sizeof(char *);
if (state < num_states) {
- n += sprintf(buf+n, "%s\t",
+ seq_printf(m, "%s\t",
power_source[state]);
have_strings = 1;
}
break;
case BATTERY_VOLTAGE:
- n += sprintf(buf+n, "Battery voltage:\t");
+ seq_printf(m, "Battery voltage:\t");
break;
case BATTERY_REMAINING:
- n += sprintf(buf+n, "Battery remaining:\t");
+ seq_printf(m, "Battery remaining:\t");
num_states = sizeof(battery_remaining) / sizeof(char *);
if (state < num_states)
{
- n += sprintf(buf+n, "%s\t",
+ seq_printf(m, "%s\t",
battery_remaining[state]);
have_strings = 1;
}
break;
case BATTERY_PERCENTAGE:
- n += sprintf(buf+n, "Battery percentage:\t");
+ seq_printf(m, "Battery percentage:\t");
break;
case EPOW_SENSOR:
- n += sprintf(buf+n, "EPOW Sensor:\t");
+ seq_printf(m, "EPOW Sensor:\t");
num_states = sizeof(epow_sensor) / sizeof(char *);
if (state < num_states) {
- n += sprintf(buf+n, "%s\t", epow_sensor[state]);
+ seq_printf(m, "%s\t", epow_sensor[state]);
have_strings = 1;
}
break;
case BATTERY_CYCLESTATE:
- n += sprintf(buf+n, "Battery cyclestate:\t");
+ seq_printf(m, "Battery cyclestate:\t");
num_states = sizeof(battery_cyclestate) /
sizeof(char *);
if (state < num_states) {
- n += sprintf(buf+n, "%s\t",
+ seq_printf(m, "%s\t",
battery_cyclestate[state]);
have_strings = 1;
}
break;
case BATTERY_CHARGING:
- n += sprintf(buf+n, "Battery Charging:\t");
+ seq_printf(m, "Battery Charging:\t");
num_states = sizeof(battery_charging) / sizeof(char *);
if (state < num_states) {
- n += sprintf(buf+n, "%s\t",
+ seq_printf(m, "%s\t",
battery_charging[state]);
have_strings = 1;
}
break;
case IBM_SURVEILLANCE:
- n += sprintf(buf+n, "Surveillance:\t");
+ seq_printf(m, "Surveillance:\t");
break;
case IBM_FANRPM:
- n += sprintf(buf+n, "Fan (rpm):\t");
+ seq_printf(m, "Fan (rpm):\t");
break;
case IBM_VOLTAGE:
- n += sprintf(buf+n, "Voltage (mv):\t");
+ seq_printf(m, "Voltage (mv):\t");
break;
case IBM_DRCONNECTOR:
- n += sprintf(buf+n, "DR connector:\t");
+ seq_printf(m, "DR connector:\t");
num_states = sizeof(ibm_drconnector) / sizeof(char *);
if (state < num_states) {
- n += sprintf(buf+n, "%s\t",
+ seq_printf(m, "%s\t",
ibm_drconnector[state]);
have_strings = 1;
}
break;
case IBM_POWERSUPPLY:
- n += sprintf(buf+n, "Powersupply:\t");
+ seq_printf(m, "Powersupply:\t");
break;
case IBM_INTQUEUE:
- n += sprintf(buf+n, "Interrupt queue:\t");
+ seq_printf(m, "Interrupt queue:\t");
num_states = sizeof(ibm_intqueue) / sizeof(char *);
if (state < num_states) {
- n += sprintf(buf+n, "%s\t",
+ seq_printf(m, "%s\t",
ibm_intqueue[state]);
have_strings = 1;
}
break;
default:
- n += sprintf(buf+n, "Unknown sensor (type %d), ignoring it\n",
- s.token);
+ seq_printf(m, "Unknown sensor (type %d), ignoring it\n",
+ s->token);
unknown = 1;
have_strings = 1;
break;
}
if (have_strings == 0) {
if (temperature) {
- n += sprintf(buf+n, "%4d /%4d\t", state, cel_to_fahr(state));
+ seq_printf(m, "%4d /%4d\t", state, cel_to_fahr(state));
} else
- n += sprintf(buf+n, "%10d\t", state);
+ seq_printf(m, "%10d\t", state);
}
if (unknown == 0) {
- n += sprintf ( buf+n, "%s\t", ppc_rtas_process_error(error));
- n += get_location_code(s, buf+n);
+ seq_printf(m, "%s\t", ppc_rtas_process_error(error));
+ get_location_code(m, s, loc);
}
- return n;
}
/* ****************************************************************** */
-int check_location (char *c, int idx, char * buf)
+static void check_location(struct seq_file *m, char *c)
{
- int n = 0;
-
- switch (*(c+idx)) {
+ switch (c[0]) {
case LOC_PLANAR:
- n += sprintf ( buf, "Planar #%c", *(c+idx+1));
+ seq_printf(m, "Planar #%c", c[1]);
break;
case LOC_CPU:
- n += sprintf ( buf, "CPU #%c", *(c+idx+1));
+ seq_printf(m, "CPU #%c", c[1]);
break;
case LOC_FAN:
- n += sprintf ( buf, "Fan #%c", *(c+idx+1));
+ seq_printf(m, "Fan #%c", c[1]);
break;
case LOC_RACKMOUNTED:
- n += sprintf ( buf, "Rack #%c", *(c+idx+1));
+ seq_printf(m, "Rack #%c", c[1]);
break;
case LOC_VOLTAGE:
- n += sprintf ( buf, "Voltage #%c", *(c+idx+1));
+ seq_printf(m, "Voltage #%c", c[1]);
break;
case LOC_LCD:
- n += sprintf ( buf, "LCD #%c", *(c+idx+1));
+ seq_printf(m, "LCD #%c", c[1]);
break;
case '.':
- n += sprintf ( buf, "- %c", *(c+idx+1));
+ seq_printf(m, "- %c", c[1]);
+ break;
default:
- n += sprintf ( buf, "Unknown location");
+ seq_printf(m, "Unknown location");
break;
}
- return n;
}
* ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
* the '.' may be an abbrevation
*/
-int check_location_string (char *c, char *buf)
+static void check_location_string(struct seq_file *m, char *c)
{
- int n=0,i=0;
-
- while (c[i]) {
- if (isalpha(c[i]) || c[i] == '.') {
- n += check_location(c, i, buf+n);
- }
- else if (c[i] == '/' || c[i] == '-')
- n += sprintf(buf+n, " at ");
- i++;
+ while (*c) {
+ if (isalpha(*c) || *c == '.')
+ check_location(m, c);
+ else if (*c == '/' || *c == '-')
+ seq_printf(m, " at ");
+ c++;
}
- return n;
}
/* ****************************************************************** */
-int get_location_code(struct individual_sensor s, char * buffer)
+static void get_location_code(struct seq_file *m, struct individual_sensor *s, char *loc)
{
- char rstr[512], tmp[10], tmp2[10];
- int n=0, i=0, llen, len;
- /* char *buf = kmalloc(MAX_LINELENGTH, GFP_KERNEL); */
- char *ret;
-
- static int pos = 0; /* remember position where buffer was */
-
- /* construct the sensor number like 0003 */
- /* fill with zeros */
- n = sprintf(tmp, "%d", s.token);
- len = strlen(tmp);
- while (strlen(tmp) < 4)
- n += sprintf (tmp+n, "0");
-
- /* invert the string */
- while (tmp[i]) {
- if (i<len)
- tmp2[4-len+i] = tmp[i];
- else
- tmp2[3-i] = tmp[i];
- i++;
- }
- tmp2[4] = '\0';
-
- sprintf (rstr, SENSOR_PREFIX"%s", tmp2);
-
- ret = (char *) get_property(rtas_node, rstr, &llen);
-
- n=0;
- if (ret == NULL || ret[0] == '\0') {
- n += sprintf ( buffer+n, "--- ");/* does not have a location */
+ if (!loc || !*loc) {
+ seq_printf(m, "---");/* does not have a location */
} else {
- char t[50];
- ret += pos;
-
- n += check_location_string(ret, buffer + n);
- n += sprintf ( buffer+n, " ");
- /* see how many characters we have printed */
- scnprintf(t, sizeof(t), "%s ", ret);
-
- pos += strlen(t);
- if (pos >= llen) pos=0;
+ check_location_string(m, loc);
}
- return n;
+ seq_putc(m, ' ');
}
/* ****************************************************************** */
/* INDICATORS - Tone Frequency */
/* ****************************************************************** */
-static ssize_t ppc_rtas_tone_freq_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_tone_freq_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
{
- char stkbuf[40]; /* its small, its on stack */
unsigned long freq;
- char *dest;
- int error;
+ int error = parse_number(buf, count, &freq);
+ if (error)
+ return error;
- if (39 < count) count = 39;
- if (copy_from_user (stkbuf, buf, count)) {
- return -EFAULT;
- }
- stkbuf[count] = 0;
- freq = simple_strtoul(stkbuf, &dest, 10);
- if (*dest != '\0' && *dest != '\n') {
- printk("ppc_rtas_tone_freq_write: Invalid tone freqency\n");
- return count;
- }
- if (freq < 0) freq = 0;
rtas_tone_frequency = freq; /* save it for later */
error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
TONE_FREQUENCY, 0, freq);
- if (error != 0)
+ if (error)
printk(KERN_WARNING "error: setting tone frequency returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static ssize_t ppc_rtas_tone_freq_read(struct file * file, char * buf,
- size_t count, loff_t *ppos)
+static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v)
{
- int n, sn;
- char stkbuf[40]; /* its small, its on stack */
-
- n = scnprintf(stkbuf, 40, "%lu\n", rtas_tone_frequency);
-
- sn = strlen (stkbuf) +1;
- if (*ppos >= sn)
- return 0;
- if (n > sn - *ppos)
- n = sn - *ppos;
- if (n > count)
- n = count;
- if (copy_to_user (buf, stkbuf + (*ppos), n)) {
- return -EFAULT;
- }
- *ppos += n;
- return n;
+ seq_printf(m, "%lu\n", rtas_tone_frequency);
+ return 0;
}
/* ****************************************************************** */
/* INDICATORS - Tone Volume */
/* ****************************************************************** */
-static ssize_t ppc_rtas_tone_volume_write(struct file * file, const char * buf,
- size_t count, loff_t *ppos)
+static ssize_t ppc_rtas_tone_volume_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
{
- char stkbuf[40]; /* its small, its on stack */
unsigned long volume;
- char *dest;
- int error;
+ int error = parse_number(buf, count, &volume);
+ if (error)
+ return error;
- if (39 < count) count = 39;
- if (copy_from_user (stkbuf, buf, count)) {
- return -EFAULT;
- }
- stkbuf[count] = 0;
- volume = simple_strtoul(stkbuf, &dest, 10);
- if (*dest != '\0' && *dest != '\n') {
- printk("ppc_rtas_tone_volume_write: Invalid tone volume\n");
- return count;
- }
- if (volume < 0) volume = 0;
- if (volume > 100) volume = 100;
+ if (volume > 100)
+ volume = 100;
rtas_tone_volume = volume; /* save it for later */
error = rtas_call(rtas_token("set-indicator"), 3, 1, NULL,
TONE_VOLUME, 0, volume);
- if (error != 0)
+ if (error)
printk(KERN_WARNING "error: setting tone volume returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
-static ssize_t ppc_rtas_tone_volume_read(struct file * file, char * buf,
- size_t count, loff_t *ppos)
+static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
{
- int n, sn;
- char stkbuf[40]; /* its small, its on stack */
-
- n = scnprintf(stkbuf, 40, "%lu\n", rtas_tone_volume);
-
- sn = strlen (stkbuf) +1;
- if (*ppos >= sn)
- return 0;
- if (n > sn - *ppos)
- n = sn - *ppos;
- if (n > count)
- n = count;
- if (copy_to_user (buf, stkbuf + (*ppos), n)) {
- return -EFAULT;
- }
- *ppos += n;
- return n;
+ seq_printf(m, "%lu\n", rtas_tone_volume);
+ return 0;
}
#define RMO_READ_BUF_MAX 30
/* RTAS Userspace access */
-static ssize_t ppc_rtas_rmo_buf_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v)
{
- char kbuf[RMO_READ_BUF_MAX];
- int n;
-
- n = sprintf(kbuf, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX);
- if (n > count)
- n = count;
-
- if (ppos && *ppos != 0)
- return 0;
-
- if (copy_to_user(buf, kbuf, n))
- return -EFAULT;
-
- if (ppos)
- *ppos = n;
-
- return n;
+ seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX);
+ return 0;
}
#include <asm/delay.h>
#include <asm/uaccess.h>
-struct flash_block_list_header rtas_firmware_flash_list = {0, 0};
+struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
struct rtas_t rtas = {
.lock = SPIN_LOCK_UNLOCKED
if (f->next)
f->next = (struct flash_block_list *)virt_to_abs(f->next);
else
- f->next = 0LL;
+ f->next = NULL;
/* make num_blocks into the version/length field */
f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
}
BUG_ON(rtas_args->token == RTAS_UNKNOWN_SERVICE);
- printk("%u %u Ready to die...\n",
+ printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
enter_rtas(__pa(rtas_args));
#define DEBUG(A...)
#endif
-static spinlock_t log_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t rtasd_log_lock = SPIN_LOCK_UNLOCKED;
DECLARE_WAIT_QUEUE_HEAD(rtas_log_wait);
if (buf == NULL)
return;
- spin_lock_irqsave(&log_lock, s);
+ spin_lock_irqsave(&rtasd_log_lock, s);
/* get length and increase count */
switch (err_type & ERR_TYPE_MASK) {
break;
case ERR_TYPE_KERNEL_PANIC:
default:
- spin_unlock_irqrestore(&log_lock, s);
+ spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
/* Check to see if we need to or have stopped logging */
if (fatal || no_more_logging) {
no_more_logging = 1;
- spin_unlock_irqrestore(&log_lock, s);
+ spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
else
rtas_log_start += 1;
- spin_unlock_irqrestore(&log_lock, s);
+ spin_unlock_irqrestore(&rtasd_log_lock, s);
wake_up_interruptible(&rtas_log_wait);
break;
case ERR_TYPE_KERNEL_PANIC:
default:
- spin_unlock_irqrestore(&log_lock, s);
+ spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
return -ENOMEM;
- spin_lock_irqsave(&log_lock, s);
+ spin_lock_irqsave(&rtasd_log_lock, s);
/* if it's 0, then we know we got the last one (the one in NVRAM) */
if (rtas_log_size == 0 && !no_more_logging)
nvram_clear_error_log();
- spin_unlock_irqrestore(&log_lock, s);
+ spin_unlock_irqrestore(&rtasd_log_lock, s);
error = wait_event_interruptible(rtas_log_wait, rtas_log_size);
if (error)
goto out;
- spin_lock_irqsave(&log_lock, s);
+ spin_lock_irqsave(&rtasd_log_lock, s);
offset = rtas_error_log_buffer_max * (rtas_log_start & LOG_NUMBER_MASK);
memcpy(tmp, &rtas_log_buf[offset], count);
rtas_log_start += 1;
rtas_log_size -= 1;
- spin_unlock_irqrestore(&log_lock, s);
+ spin_unlock_irqrestore(&rtasd_log_lock, s);
error = copy_to_user(buf, tmp, count) ? -EFAULT : count;
out:
else
printk(KERN_ERR "Failed to create error_log proc entry\n");
- if (kernel_thread(rtasd, 0, CLONE_FS) < 0)
+ if (kernel_thread(rtasd, NULL, CLONE_FS) < 0)
printk(KERN_ERR "Failed to start RTAS daemon\n");
return 0;
* ioctls.
*/
-static loff_t rtc_llseek(struct file *file, loff_t offset, int origin);
-
-static ssize_t rtc_read(struct file *file, char *buf,
+static ssize_t rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
static int rtc_ioctl(struct inode *inode, struct file *file,
* Now all the various file operations that we export.
*/
-static loff_t rtc_llseek(struct file *file, loff_t offset, int origin)
-{
- return -ESPIPE;
-}
-
-static ssize_t rtc_read(struct file *file, char *buf,
+static ssize_t rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return -EIO;
if (!capable(CAP_SYS_TIME))
return -EACCES;
- if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
+ if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg,
sizeof(struct rtc_time)))
return -EFAULT;
}
case RTC_EPOCH_READ: /* Read the epoch. */
{
- return put_user (epoch, (unsigned long *)arg);
+ return put_user (epoch, (unsigned long __user *)arg);
}
case RTC_EPOCH_SET: /* Set the epoch. */
{
default:
return -EINVAL;
}
- return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
+ return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
}
static int rtc_open(struct inode *inode, struct file *file)
{
+ nonseekable_open(inode, file);
return 0;
}
*/
static struct file_operations rtc_fops = {
.owner = THIS_MODULE,
- .llseek = rtc_llseek,
+ .llseek = no_llseek,
.read = rtc_read,
.ioctl = rtc_ioctl,
.open = rtc_open,
return retval;
#ifdef CONFIG_PROC_FS
- if(create_proc_read_entry ("driver/rtc", 0, 0, rtc_read_proc, NULL) == NULL)
+ if (create_proc_read_entry ("driver/rtc", 0, NULL, rtc_read_proc, NULL) == NULL)
misc_deregister(&rtc_dev);
return -ENOMEM;
#endif
void cpu_die(void)
{
local_irq_disable();
+ /* Some hardware requires clearing the CPPR, while other hardware does not
+ * it is safe either way
+ */
+ pSeriesLP_cppr_info(0, 0);
rtas_stop_self();
/* Should never get here... */
BUG();
/* Fixup atomic count: it exited inside IRQ handler. */
paca[lcpu].__current->thread_info->preempt_count = 0;
- /* Fixup SLB round-robin so next segment (kernel) goes in segment 0 */
- paca[lcpu].stab_next_rr = 0;
/* At boot this is done in prom.c. */
paca[lcpu].hw_cpu_id = pcpu;
}
maxcpus = ireg[num_addr_cell + num_size_cell];
- /* DRENG need to account for threads here too */
+
+ /* Double maxcpus for processors which have SMT capability */
+ if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
+ maxcpus *= 2;
+
if (maxcpus > NR_CPUS) {
printk(KERN_WARNING
printk("smp_call_function on cpu %d: other cpus not "
"responding (%d)\n", smp_processor_id(),
atomic_read(&data.started));
- debugger(0);
+ debugger(NULL);
goto out;
}
}
smp_processor_id(),
atomic_read(&data.finished),
atomic_read(&data.started));
- debugger(0);
+ debugger(NULL);
goto out;
}
}
if (smp_ops->give_timebase)
smp_ops->give_timebase();
- cpu_set(cpu, cpu_online_map);
+
+ /* Wait until cpu puts itself in the online map */
+ while (!cpu_online(cpu))
+ cpu_relax();
+
return 0;
}
#endif
#endif
+ spin_lock(&call_lock);
+ cpu_set(cpu, cpu_online_map);
+ spin_unlock(&call_lock);
+
local_irq_enable();
return cpu_idle(NULL);
#include <asm/naca.h>
#include <asm/cputable.h>
-static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
-static void make_slbe(unsigned long esid, unsigned long vsid, int large,
- int kernel_segment);
+static int make_ste(unsigned long stab, unsigned long esid,
+ unsigned long vsid);
-static inline void slb_add_bolted(void)
-{
-#ifndef CONFIG_PPC_ISERIES
- unsigned long esid = GET_ESID(VMALLOCBASE);
- unsigned long vsid = get_kernel_vsid(VMALLOCBASE);
-
- WARN_ON(!irqs_disabled());
-
- /*
- * Bolt in the first vmalloc segment. Since modules end
- * up there it gets hit very heavily.
- */
- get_paca()->stab_next_rr = 1;
- make_slbe(esid, vsid, 0, 1);
-#endif
-}
+void slb_initialize(void);
/*
* Build an entry for the base kernel segment and put it into
*/
void stab_initialize(unsigned long stab)
{
- unsigned long esid, vsid;
- int seg0_largepages = 0;
-
- esid = GET_ESID(KERNELBASE);
- vsid = get_kernel_vsid(esid << SID_SHIFT);
-
- if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
- seg0_largepages = 1;
+ unsigned long vsid = get_kernel_vsid(KERNELBASE);
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
- /* Invalidate the entire SLB & all the ERATS */
-#ifdef CONFIG_PPC_ISERIES
- asm volatile("isync; slbia; isync":::"memory");
-#else
- asm volatile("isync":::"memory");
- asm volatile("slbmte %0,%0"::"r" (0) : "memory");
- asm volatile("isync; slbia; isync":::"memory");
- get_paca()->stab_next_rr = 0;
- make_slbe(esid, vsid, seg0_largepages, 1);
- asm volatile("isync":::"memory");
-#endif
-
- slb_add_bolted();
+ slb_initialize();
} else {
asm volatile("isync; slbia; isync":::"memory");
- make_ste(stab, esid, vsid);
+ make_ste(stab, GET_ESID(KERNELBASE), vsid);
/* Order update */
asm volatile("sync":::"memory");
* Could not find empty entry, pick one with a round robin selection.
* Search all entries in the two groups.
*/
- castout_entry = get_paca()->stab_next_rr;
+ castout_entry = get_paca()->stab_rr;
for (i = 0; i < 16; i++) {
if (castout_entry < 8) {
global_entry = (esid & 0x1f) << 3;
castout_entry = (castout_entry + 1) & 0xf;
}
- get_paca()->stab_next_rr = (castout_entry + 1) & 0xf;
+ get_paca()->stab_rr = (castout_entry + 1) & 0xf;
/* Modify the old entry to the new value. */
preload_stab(tsk, mm);
}
-
-/*
- * SLB stuff
- */
-
-/*
- * Create a segment buffer entry for the given esid/vsid pair.
- *
- * NOTE: A context syncronising instruction is required before and after
- * this, in the common case we use exception entry and rfid.
- */
-static void make_slbe(unsigned long esid, unsigned long vsid, int large,
- int kernel_segment)
-{
- unsigned long entry, castout_entry;
- union {
- unsigned long word0;
- slb_dword0 data;
- } esid_data;
- union {
- unsigned long word0;
- slb_dword1 data;
- } vsid_data;
- struct paca_struct *lpaca = get_paca();
-
- /*
- * We take the next entry, round robin. Previously we tried
- * to find a free slot first but that took too long. Unfortunately
- * we dont have any LRU information to help us choose a slot.
- */
-
- /*
- * Never cast out the segment for our kernel stack. Since we
- * dont invalidate the ERAT we could have a valid translation
- * for the kernel stack during the first part of exception exit
- * which gets invalidated due to a tlbie from another cpu at a
- * non recoverable point (after setting srr0/1) - Anton
- *
- * paca Ksave is always valid (even when on the interrupt stack)
- * so we use that.
- */
- castout_entry = lpaca->stab_next_rr;
- do {
- entry = castout_entry;
- castout_entry++;
- /*
- * We bolt in the first kernel segment and the first
- * vmalloc segment.
- */
- if (castout_entry >= SLB_NUM_ENTRIES)
- castout_entry = 2;
- asm volatile("slbmfee %0,%1" : "=r" (esid_data) : "r" (entry));
- } while (esid_data.data.v &&
- esid_data.data.esid == GET_ESID(lpaca->kstack));
-
- lpaca->stab_next_rr = castout_entry;
-
- /* slbie not needed as the previous mapping is still valid. */
-
- /*
- * Write the new SLB entry.
- */
- vsid_data.word0 = 0;
- vsid_data.data.vsid = vsid;
- vsid_data.data.kp = 1;
- if (large)
- vsid_data.data.l = 1;
- if (kernel_segment)
- vsid_data.data.c = 1;
- else
- vsid_data.data.ks = 1;
-
- esid_data.word0 = 0;
- esid_data.data.esid = esid;
- esid_data.data.v = 1;
- esid_data.data.index = entry;
-
- /*
- * No need for an isync before or after this slbmte. The exception
- * we enter with and the rfid we exit with are context synchronizing.
- */
- asm volatile("slbmte %0,%1" : : "r" (vsid_data), "r" (esid_data));
-}
-
-static inline void __slb_allocate(unsigned long esid, unsigned long vsid,
- mm_context_t context)
-{
- int large = 0;
- int region_id = REGION_ID(esid << SID_SHIFT);
- unsigned long offset;
-
- if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) {
- if (region_id == KERNEL_REGION_ID)
- large = 1;
- else if (region_id == USER_REGION_ID)
- large = in_hugepage_area(context, esid << SID_SHIFT);
- }
-
- make_slbe(esid, vsid, large, region_id != USER_REGION_ID);
-
- if (region_id != USER_REGION_ID)
- return;
-
- offset = __get_cpu_var(stab_cache_ptr);
- if (offset < NR_STAB_CACHE_ENTRIES)
- __get_cpu_var(stab_cache[offset++]) = esid;
- else
- offset = NR_STAB_CACHE_ENTRIES+1;
- __get_cpu_var(stab_cache_ptr) = offset;
-}
-
-/*
- * Allocate a segment table entry for the given ea.
- */
-int slb_allocate(unsigned long ea)
-{
- unsigned long vsid, esid;
- mm_context_t context;
-
- /* Check for invalid effective addresses. */
- if (unlikely(!IS_VALID_EA(ea)))
- return 1;
-
- /* Kernel or user address? */
- if (REGION_ID(ea) >= KERNEL_REGION_ID) {
- context = KERNEL_CONTEXT(ea);
- vsid = get_kernel_vsid(ea);
- } else {
- if (unlikely(!current->mm))
- return 1;
-
- context = current->mm->context;
- vsid = get_vsid(context.id, ea);
- }
-
- esid = GET_ESID(ea);
-#ifndef CONFIG_PPC_ISERIES
- BUG_ON((esid << SID_SHIFT) == VMALLOCBASE);
-#endif
- __slb_allocate(esid, vsid, context);
-
- return 0;
-}
-
-/*
- * preload some userspace segments into the SLB.
- */
-static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
-{
- unsigned long pc = KSTK_EIP(tsk);
- unsigned long stack = KSTK_ESP(tsk);
- unsigned long unmapped_base;
- unsigned long pc_esid = GET_ESID(pc);
- unsigned long stack_esid = GET_ESID(stack);
- unsigned long unmapped_base_esid;
- unsigned long vsid;
-
- if (test_tsk_thread_flag(tsk, TIF_32BIT))
- unmapped_base = TASK_UNMAPPED_BASE_USER32;
- else
- unmapped_base = TASK_UNMAPPED_BASE_USER64;
-
- unmapped_base_esid = GET_ESID(unmapped_base);
-
- if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID))
- return;
- vsid = get_vsid(mm->context.id, pc);
- __slb_allocate(pc_esid, vsid, mm->context);
-
- if (pc_esid == stack_esid)
- return;
-
- if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID))
- return;
- vsid = get_vsid(mm->context.id, stack);
- __slb_allocate(stack_esid, vsid, mm->context);
-
- if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid)
- return;
-
- if (!IS_VALID_EA(unmapped_base) ||
- (REGION_ID(unmapped_base) >= KERNEL_REGION_ID))
- return;
- vsid = get_vsid(mm->context.id, unmapped_base);
- __slb_allocate(unmapped_base_esid, vsid, mm->context);
-}
-
-/* Flush all user entries from the segment table of the current processor. */
-void flush_slb(struct task_struct *tsk, struct mm_struct *mm)
-{
- unsigned long offset = __get_cpu_var(stab_cache_ptr);
- union {
- unsigned long word0;
- slb_dword0 data;
- } esid_data;
-
- if (offset <= NR_STAB_CACHE_ENTRIES) {
- int i;
- asm volatile("isync" : : : "memory");
- for (i = 0; i < offset; i++) {
- esid_data.word0 = 0;
- esid_data.data.esid = __get_cpu_var(stab_cache[i]);
- BUG_ON(esid_data.data.esid == GET_ESID(VMALLOCBASE));
- asm volatile("slbie %0" : : "r" (esid_data));
- }
- asm volatile("isync" : : : "memory");
- } else {
- asm volatile("isync; slbia; isync" : : : "memory");
- slb_add_bolted();
- }
-
- /* Workaround POWER5 < DD2.1 issue */
- if (offset == 1 || offset > NR_STAB_CACHE_ENTRIES) {
- /*
- * flush segment in EEH region, we dont normally access
- * addresses in this region.
- */
- esid_data.word0 = 0;
- esid_data.data.esid = EEH_REGION_ID;
- asm volatile("slbie %0" : : "r" (esid_data));
- }
-
- __get_cpu_var(stab_cache_ptr) = 0;
-
- preload_slb(tsk, mm);
-}
unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
return sprintf(buf, "%lx\n", val); \
} \
-static ssize_t store_##NAME(struct sys_device *dev, const char *buf, \
- size_t count) \
+static ssize_t __attribute_used__ \
+ store_##NAME(struct sys_device *dev, const char *buf, size_t count) \
{ \
struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
unsigned long val; \
viodev->dev.platform_data = of_node_get(of_node);
viodev->irq = NO_IRQ;
- irq_p = (unsigned int *)get_property(of_node, "interrupts", 0);
+ irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
if (irq_p) {
int virq = virt_irq_create_mapping(*irq_p);
if (virq == NO_IRQ) {
#include <asm/naca.h>
#include <asm/rtas.h>
#include <asm/xics.h>
-#include <asm/ppcdebug.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
val64);
}
-static void pSeriesLP_cppr_info(int n_cpu, u8 value)
+void pSeriesLP_cppr_info(int n_cpu, u8 value)
{
unsigned long lpar_rc;
#ifdef CONFIG_SMP
static int get_irq_server(unsigned int irq)
{
- cpumask_t cpumask = irq_affinity[irq];
- cpumask_t tmp = CPU_MASK_NONE;
unsigned int server;
#ifdef CONFIG_IRQ_ALL_CPUS
/* For the moment only implement delivery to all cpus or one cpu */
if (smp_threads_ready) {
+ cpumask_t cpumask = irq_affinity[irq];
+ cpumask_t tmp = CPU_MASK_NONE;
if (cpus_equal(cpumask, CPU_MASK_ALL)) {
server = default_distrib_server;
} else {
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
DEFAULT_PRIORITY);
if (call_status != 0) {
- printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_set_xive "
+ printk(KERN_ERR "xics_enable_irq: irq=%d: ibm_set_xive "
"returned %x\n", irq, call_status);
return;
}
/* Now unmask the interrupt (often a no-op) */
call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
if (call_status != 0) {
- printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_int_on "
+ printk(KERN_ERR "xics_enable_irq: irq=%d: ibm_int_on "
"returned %x\n", irq, call_status);
return;
}
call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
if (call_status != 0) {
- printk(KERN_ERR "xics_disable_real_irq: irq=%x: "
+ printk(KERN_ERR "xics_disable_real_irq: irq=%d: "
"ibm_int_off returned %x\n", irq, call_status);
return;
}
/* Have to set XIVE to 0xff to be able to remove a slot */
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
if (call_status != 0) {
- printk(KERN_ERR "xics_disable_irq: irq=%x: ibm_set_xive(0xff)"
+ printk(KERN_ERR "xics_disable_irq: irq=%d: ibm_set_xive(0xff)"
" returned %x\n", irq, call_status);
return;
}
}
}
-extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
-
int xics_get_irq(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
if (irq == NO_IRQ)
irq = real_irq_to_virt_slowpath(vec);
if (irq == NO_IRQ) {
- printk(KERN_ERR "Interrupt 0x%x (real) is invalid,"
+ printk(KERN_ERR "Interrupt %d (real) is invalid,"
" disabling it.\n", vec);
xics_disable_real_irq(vec);
} else
#ifdef CONFIG_SMP
-extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
-
irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
{
int cpu = smp_processor_id();
ibm_int_off = rtas_token("ibm,int-off");
np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
- if (!np) {
- printk(KERN_WARNING "Can't find Interrupt Presentation\n");
- udbg_printf("Can't find Interrupt Presentation\n");
- while (1);
- }
+ if (!np)
+ panic("xics_init_IRQ: can't find interrupt presentation");
+
nextnode:
- ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
+ ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL);
if (ireg) {
/*
* set node starting index for this node
}
ireg = (uint *)get_property(np, "reg", &ilen);
- if (!ireg) {
- printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
- udbg_printf("Can't find Interrupt Reg Property\n");
- while (1);
- }
+ if (!ireg)
+ panic("xics_init_IRQ: can't find interrupt reg property");
while (ilen) {
inodes[indx].addr = (unsigned long long)*ireg++ << 32;
np = of_find_node_by_type(NULL, "interrupt-controller");
if (!np) {
- printk(KERN_WARNING "xics: no ISA Interrupt Controller\n");
+ printk(KERN_WARNING "xics: no ISA interrupt controller\n");
xics_irq_8259_cascade_real = -1;
xics_irq_8259_cascade = -1;
} else {
- ireg = (uint *) get_property(np, "interrupts", 0);
- if (!ireg) {
- printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
- udbg_printf("Can't find ISA Interrupts Property\n");
- while (1);
- }
+ ireg = (uint *) get_property(np, "interrupts", NULL);
+ if (!ireg)
+ panic("xics_init_IRQ: can't find ISA interrupts property");
+
xics_irq_8259_cascade_real = *ireg;
xics_irq_8259_cascade
= virt_irq_create_mapping(xics_irq_8259_cascade_real);
xics_per_cpu[0] = __ioremap((ulong)intr_base, intr_size,
_PAGE_NO_CACHE);
#endif /* CONFIG_SMP */
-#ifdef CONFIG_PPC_PSERIES
- /* actually iSeries does not use any of xics...but it has link dependencies
- * for now, except this new one...
- */
} else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
ops = &pSeriesLP_ops;
-#endif
}
xics_8259_pic.enable = i8259_pic.enable;
if (naca->interrupt_controller == IC_PPC_XIC &&
xics_irq_8259_cascade != -1) {
if (request_irq(irq_offset_up(xics_irq_8259_cascade),
- no_action, 0, "8259 cascade", 0))
- printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
+ no_action, 0, "8259 cascade", NULL))
+ printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
+ "cascade\n");
i8259_init();
}
return 0;
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
- "IPI", 0);
+ "IPI", NULL);
get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
}
#endif
irq, newmask, xics_status[1]);
if (status) {
- printk(KERN_ERR "xics_set_affinity irq=%d ibm,set-xive "
+ printk(KERN_ERR "xics_set_affinity: irq=%d ibm,set-xive "
"returns %d\n", irq, status);
return;
}
int set_indicator = rtas_token("set-indicator");
const unsigned int giqs = 9005UL; /* Global Interrupt Queue Server */
int status = 0;
- unsigned int irq, cpu = smp_processor_id();
- int xics_status[2];
- unsigned long flags;
+ unsigned int irq, virq, cpu = smp_processor_id();
BUG_ON(set_indicator == RTAS_UNKNOWN_SERVICE);
ops->cppr_info(cpu, DEFAULT_PRIORITY);
iosync();
- printk(KERN_WARNING "HOTPLUG: Migrating IRQs away\n");
- for_each_irq(irq) {
- irq_desc_t *desc = get_irq_desc(irq);
+ for_each_irq(virq) {
+ irq_desc_t *desc;
+ int xics_status[2];
+ unsigned long flags;
+
+ /* We cant set affinity on ISA interrupts */
+ if (virq < irq_offset_value())
+ continue;
+
+ desc = get_irq_desc(virq);
+ irq = virt_irq_to_real(irq_offset_down(virq));
/* We need to get IPIs still. */
- if (irq_offset_down(irq) == XICS_IPI)
+ if (irq == XICS_IPI || irq == NO_IRQ)
continue;
/* We only need to migrate enabled IRQS */
if (status) {
printk(KERN_ERR "migrate_irqs_away: irq=%d "
"ibm,get-xive returns %d\n",
- irq, status);
+ virq, status);
goto unlock;
}
goto unlock;
printk(KERN_WARNING "IRQ %d affinity broken off cpu %u\n",
- irq, cpu);
+ virq, cpu);
/* Reset affinity to all cpus */
xics_status[0] = default_distrib_server;
- status = rtas_call(ibm_set_xive, 3, 1, NULL,
- irq, xics_status[0], xics_status[1]);
+ status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
+ xics_status[0], xics_status[1]);
if (status)
- printk(KERN_ERR "migrate_irqs_away irq=%d "
+ printk(KERN_ERR "migrate_irqs_away: irq=%d "
"ibm,set-xive returns %d\n",
- irq, status);
+ virq, status);
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
}
-
}
#endif
EXTRA_CFLAGS += -mno-minimal-toc
-obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o
+obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o slb_low.o slb.o
obj-$(CONFIG_DISCONTIGMEM) += numa.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
unsigned long is_write = error_code & 0x02000000;
unsigned long trap = TRAP(regs);
- if (trap == 0x300 || trap == 0x380) {
+ BUG_ON((trap == 0x380) || (trap == 0x480));
+
+ if (trap == 0x300) {
if (debugger_fault_handler(regs))
return 0;
}
/* On a kernel SLB miss we can only check for a valid exception entry */
- if (!user_mode(regs) && (trap == 0x380 || address >= TASK_SIZE))
+ if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV;
if (error_code & 0x00400000) {
struct mm_struct *mm;
pte_t *ptep;
int ret;
- int cpu;
int user_region = 0;
int local = 0;
cpumask_t tmp;
if (pgdir == NULL)
return 1;
- cpu = get_cpu();
- tmp = cpumask_of_cpu(cpu);
+ tmp = cpumask_of_cpu(smp_processor_id());
if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
local = 1;
ret = hash_huge_page(mm, access, ea, vsid, local);
else {
ptep = find_linux_pte(pgdir, ea);
- if (ptep == NULL) {
- put_cpu();
+ if (ptep == NULL)
return 1;
- }
ret = __hash_page(ea, access, vsid, ptep, trap, local);
}
- put_cpu();
return ret;
}
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
+ max_pfn = max_low_pfn;
+
/* add all physical memory to the bootmem map. Also find the first */
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long physbase, size;
num_physpages = max_low_pfn; /* RAM is assumed contiguous */
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- max_pfn = max_low_pfn;
#ifdef CONFIG_DISCONTIGMEM
{
totalram_pages += free_all_bootmem();
- for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM());
+ for (addr = KERNELBASE; addr < (unsigned long)__va(lmb_end_of_DRAM());
addr += PAGE_SIZE) {
if (!PageReserved(virt_to_page(addr)))
continue;
void *pgdir;
pte_t *ptep;
int local = 0;
- int cpu;
cpumask_t tmp;
+ unsigned long flags;
/* handle i-cache coherency */
if (!(cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE) &&
vsid = get_vsid(vma->vm_mm->context.id, ea);
- cpu = get_cpu();
- tmp = cpumask_of_cpu(cpu);
+ local_irq_save(flags);
+ tmp = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
0x300, local);
- put_cpu();
+ local_irq_restore(flags);
}
void * reserve_phb_iospace(unsigned long size)
min_low_pfn = 0;
max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+ max_pfn = max_low_pfn;
if (parse_numa_properties())
setup_nonnuma();
If unsure, say N.
+config QDIO_DEBUG
+ bool "Extended debugging information"
+ depends on QDIO
+ help
+ Say Y here to get extended debugging output in /proc/s390dbf/qdio...
+ Warning: this option reduces the performance of the QDIO module.
+
+ If unsure, say N.
+
comment "Misc"
config PREEMPT
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <asm/io.h>
+#include <asm/smp.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/page-flags.h>
*/
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp);
+ void __user *buffer, size_t *lenp, loff_t *ppos);
static int appldata_interval_handler(ctl_table *ctl, int write,
struct file *filp,
void __user *buffer,
- size_t *lenp);
+ size_t *lenp, loff_t *ppos);
static struct ctl_table_header *appldata_sysctl_header;
static struct ctl_table appldata_table[] = {
*/
static int
appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int len;
char buf[2];
- if (!*lenp || filp->f_pos) {
+ if (!*lenp || *ppos) {
*lenp = 0;
return 0;
}
spin_unlock(&appldata_timer_lock);
out:
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return 0;
}
*/
static int
appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int len, interval;
char buf[16];
- if (!*lenp || filp->f_pos) {
+ if (!*lenp || *ppos) {
*lenp = 0;
return 0;
}
interval);
out:
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return 0;
}
*/
static int
appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct appldata_ops *ops = NULL, *tmp_ops;
int rc, len, found;
}
spin_unlock_bh(&appldata_ops_lock);
- if (!*lenp || filp->f_pos) {
+ if (!*lenp || *ppos) {
*lenp = 0;
module_put(ops->owner);
return 0;
spin_unlock_bh(&appldata_ops_lock);
out:
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
module_put(ops->owner);
return 0;
}
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
#
# General setup
CONFIG_MACHCHK_WARNING=y
CONFIG_QDIO=y
# CONFIG_QDIO_PERF_STATS is not set
+# CONFIG_QDIO_DEBUG is not set
#
# Misc
#
# Generic Driver Options
#
+CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CLK_JIFFIES=y
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
# CONFIG_NET_SCH_HTB is not set
# CONFIG_NET_SCH_HFSC is not set
-CONFIG_NET_SCH_CSZ=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
-# CONFIG_NET_SCH_DELAY is not set
+# CONFIG_NET_SCH_NETEM is not set
# CONFIG_NET_SCH_INGRESS is not set
CONFIG_NET_QOS=y
CONFIG_NET_ESTIMATOR=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_FAT_FS is not set
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
# CONFIG_NTFS_FS is not set
#
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_TWOFISH is not set
# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_AES_GENERIC is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
# CONFIG_CRYPTO_TEA is not set
#
# Library routines
#
-# CONFIG_CRC16 is not set
+# CONFIG_CRC_CCITT is not set
# CONFIG_CRC32 is not set
# CONFIG_LIBCRC32C is not set
} _sigev_un;
};
+extern int copy_siginfo_to_user32(siginfo_t32 __user *to, siginfo_t *from);
+extern int copy_siginfo_from_user32(siginfo_t *to, siginfo_t32 __user *from);
+
#endif /* _ASM_S390X_S390_H */
return err;
}
+int copy_siginfo_from_user32(siginfo_t *to, siginfo_t32 __user *from)
+{
+ int err;
+ u32 tmp;
+
+ if (!access_ok (VERIFY_READ, from, sizeof(siginfo_t32)))
+ return -EFAULT;
+
+ err = __get_user(to->si_signo, &from->si_signo);
+ err |= __get_user(to->si_errno, &from->si_errno);
+ err |= __get_user(to->si_code, &from->si_code);
+
+ if (from->si_code < 0)
+ err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+ else {
+ switch (from->si_code >> 16) {
+ case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
+ case __SI_MESGQ >> 16:
+ err |= __get_user(to->si_int, &from->si_int);
+ /* fallthrough */
+ case __SI_KILL >> 16:
+ err |= __get_user(to->si_pid, &from->si_pid);
+ err |= __get_user(to->si_uid, &from->si_uid);
+ break;
+ case __SI_CHLD >> 16:
+ err |= __get_user(to->si_pid, &from->si_pid);
+ err |= __get_user(to->si_uid, &from->si_uid);
+ err |= __get_user(to->si_utime, &from->si_utime);
+ err |= __get_user(to->si_stime, &from->si_stime);
+ err |= __get_user(to->si_status, &from->si_status);
+ break;
+ case __SI_FAULT >> 16:
+ err |= __get_user(tmp, &from->si_addr);
+ to->si_addr = (void *)(u64) (tmp & PSW32_ADDR_INSN);
+ break;
+ case __SI_POLL >> 16:
+ case __SI_TIMER >> 16:
+ err |= __get_user(to->si_band, &from->si_band);
+ err |= __get_user(to->si_fd, &from->si_fd);
+ break;
+ default:
+ break;
+ }
+ }
+ return err;
+}
+
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
* R15 - kernel stack pointer
*/
- .macro SAVE_ALL_BASE psworg,savearea,sync
- stm %r12,%r15,\savearea
- l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
+ .macro SAVE_ALL_BASE savearea
+ stm %r12,%r15,\savearea
+ l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
.endm
- .macro CLEANUP_SAVE_ALL_BASE psworg,savearea,sync
- l %r1,SP_PSW+4(%r15)
- cli 1(%r1),0xcf
- bne BASED(0f)
- mvc \savearea(16),SP_R12(%r15)
-0: st %r13,SP_R13(%r15)
- .endm
-
- .macro SAVE_ALL psworg,savearea,sync
+ .macro SAVE_ALL psworg,savearea,sync
+ la %r12,\psworg
.if \sync
- tm \psworg+1,0x01 # test problem state bit
- bz BASED(1f) # skip stack setup save
- l %r15,__LC_KERNEL_STACK # problem state -> load ksp
+ tm \psworg+1,0x01 # test problem state bit
+ bz BASED(2f) # skip stack setup save
+ l %r15,__LC_KERNEL_STACK # problem state -> load ksp
.else
- tm \psworg+1,0x01 # test problem state bit
- bnz BASED(0f) # from user -> load async stack
- l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
- slr %r14,%r15
+ tm \psworg+1,0x01 # test problem state bit
+ bnz BASED(1f) # from user -> load async stack
+ clc \psworg+4(4),BASED(.Lcritical_end)
+ bhe BASED(0f)
+ clc \psworg+4(4),BASED(.Lcritical_start)
+ bl BASED(0f)
+ l %r14,BASED(.Lcleanup_critical)
+ basr %r14,%r14
+ tm 0(%r12),0x01 # retest problem state after cleanup
+ bnz BASED(1f)
+0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
+ slr %r14,%r15
sra %r14,13
- be BASED(1f)
-0: l %r15,__LC_ASYNC_STACK
+ be BASED(2f)
+1: l %r15,__LC_ASYNC_STACK
.endif
-1: s %r15,BASED(.Lc_spsize) # make room for registers & psw
- l %r14,BASED(.L\psworg)
- slr %r12,%r12
- icm %r14,12,__LC_SVC_ILC
- stm %r0,%r11,SP_R0(%r15) # store gprs 0-12 to kernel stack
- st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- mvc SP_R12(16,%r15),\savearea # move R13-R15 to stack
- mvc SP_PSW(8,%r15),\psworg # move user PSW to stack
- st %r14,SP_ILC(%r15)
- st %r12,0(%r15) # clear back chain
- .endm
-
- .macro CLEANUP_SAVE_ALL psworg,savearea,sync
- l %r1,\savearea+12
- .if \sync
- tm \psworg+1,0x01
- bz BASED(1f)
- l %r1,__LC_KERNEL_STACK
- .else
- tm \psworg+1,0x01
- bnz BASED(0f)
- l %r0,__LC_ASYNC_STACK
- slr %r0,%r1
- sra %r0,13
- bz BASED(1f)
-0: l %r1,__LC_ASYNC_STACK
- .endif
-1: s %r1,BASED(.Lc_spsize)
- st %r1,SP_R15(%r15)
- l %r0,BASED(.L\psworg)
- xc SP_R12(4,%r15),SP_R12(%r15)
- icm %r0,12,__LC_SVC_ILC
- st %r0,SP_R14(%r15)
- mvc SP_R0(48,%r1),SP_R0(%r15)
- mvc SP_ORIG_R2(4,%r1),SP_R2(%r15)
- mvc SP_R12(16,%r1),\savearea
- mvc SP_PSW(8,%r1),\psworg
- st %r0,SP_ILC(%r1)
- xc 0(4,%r1),0(%r1)
- .endm
-
- .macro RESTORE_ALL # system exit macro
- mvc __LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore
- ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
- lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
- lpsw __LC_RETURN_PSW # back to caller
- .endm
-
- .macro CLEANUP_RESTORE_ALL
- l %r1,SP_PSW+4(%r15)
- cli 0(%r1),0x82
- bne BASED(0f)
- mvc SP_PSW(8,%r15),__LC_RETURN_PSW
- b BASED(1f)
-0: l %r1,SP_R15(%r15)
- mvc SP_PSW(8,%r15),SP_PSW(%r1)
- mvc SP_R0(64,%r15),SP_R0(%r1)
-1:
+2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
+ mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
+ la %r12,\psworg
+ st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
+ icm %r12,12,__LC_SVC_ILC
+ stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
+ st %r12,SP_ILC(%r15)
+ mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
+ la %r12,0
+ st %r12,0(%r15) # clear back chain
.endm
- .macro GET_THREAD_INFO
- l %r9,__LC_THREAD_INFO
- .endm
-
- .macro CHECK_CRITICAL
- tm SP_PSW+1(%r15),0x01 # test problem state bit
- bnz BASED(0f) # from user -> not critical
- clc SP_PSW+4(4,%r15),BASED(.Lcritical_end)
- bnl BASED(0f)
- clc SP_PSW+4(4,%r15),BASED(.Lcritical_start)
- bl BASED(0f)
- l %r1,BASED(.Lcleanup_critical)
- basr %r14,%r1
-0:
+ .macro RESTORE_ALL sync
+ mvc __LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore
+ .if !\sync
+ ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
+ .endif
+ lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
+ lpsw __LC_RETURN_PSW # back to caller
.endm
/*
.globl system_call
system_call:
- SAVE_ALL_BASE __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
lh %r7,0x8a # get svc number from lowcore
-sysc_enter:
- GET_THREAD_INFO # load pointer to task_struct to R9
sysc_do_svc:
+ l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
sla %r7,2 # *4 and test for svc 0
bnz BASED(sysc_nr_ok) # svc number > 0
# svc 0: system call number in %r1
tm __TI_flags+3(%r9),_TIF_WORK_SVC
bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_leave:
- RESTORE_ALL
+ RESTORE_ALL 1
#
# recheck if there is more work to do
#
sysc_work_loop:
- GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+3(%r9),_TIF_WORK_SVC
bz BASED(sysc_leave) # there is no work to do
#
.globl ret_from_fork
ret_from_fork:
l %r13,__LC_SVC_NEW_PSW+4
- GET_THREAD_INFO # load pointer to task_struct to R9
+ l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,BASED(.Lschedtail)
basr %r14,%r1
stosm 24(%r15),0x03 # reenable interrupts
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
- SAVE_ALL_BASE __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+ SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
bnz BASED(pgm_per) # got per exception -> special case
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+ l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
nr %r8,%r3
pgm_do_call:
l %r7,BASED(.Ljump_table)
sll %r8,2
- GET_THREAD_INFO
l %r7,0(%r8,%r7) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
la %r14,BASED(sysc_return)
clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
be BASED(pgm_svcper)
# no interesting special case, ignore PER event
- lm %r13,%r15,__LC_SAVE_AREA
+ lm %r12,%r15,__LC_SAVE_AREA
lpsw 0x28
#
#
pgm_per_std:
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
- GET_THREAD_INFO
+ l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
lh %r7,0x8a # get svc number from lowcore
- GET_THREAD_INFO # load pointer to task_struct to R9
+ l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
.globl io_int_handler
io_int_handler:
- SAVE_ALL_BASE __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
- SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
stck __LC_INT_CLOCK
- CHECK_CRITICAL
- GET_THREAD_INFO # load pointer to task_struct to R9
+ SAVE_ALL_BASE __LC_SAVE_AREA+16
+ SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
+ l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to standard irq handler
tm __TI_flags+3(%r9),_TIF_WORK_INT
bnz BASED(io_work) # there is work to do (signals etc.)
io_leave:
- RESTORE_ALL
+ RESTORE_ALL 0
#ifdef CONFIG_PREEMPT
io_preempt:
l %r1,BASED(.Lschedule)
basr %r14,%r1 # call schedule
stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
- GET_THREAD_INFO # load pointer to task_struct to R9
xc __TI_precount(4,%r9),__TI_precount(%r9)
b BASED(io_resume_loop)
#endif
stosm 24(%r15),0x03 # reenable interrupts
basr %r14,%r1 # call scheduler
stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
- GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+3(%r9),_TIF_WORK_INT
bz BASED(io_leave) # there is no work to do
b BASED(io_work_loop)
.globl ext_int_handler
ext_int_handler:
- SAVE_ALL_BASE __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
- SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
stck __LC_INT_CLOCK
- CHECK_CRITICAL
- GET_THREAD_INFO # load pointer to task_struct to R9
+ SAVE_ALL_BASE __LC_SAVE_AREA+16
+ SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
+ l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # address of register-save area
lh %r3,__LC_EXT_INT_CODE # get interruption code
l %r1,BASED(.Ldo_extint)
.globl mcck_int_handler
mcck_int_handler:
- SAVE_ALL_BASE __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0
+ SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0
l %r1,BASED(.Ls390_mcck)
basr %r14,%r1 # call machine check handler
mcck_return:
- RESTORE_ALL
+ RESTORE_ALL 0
#ifdef CONFIG_SMP
/*
restart_go:
#endif
-cleanup_table:
- .long system_call, sysc_enter, cleanup_sysc_enter
- .long sysc_return, sysc_leave, cleanup_sysc_return
- .long sysc_leave, sysc_work_loop, cleanup_sysc_leave
- .long sysc_work_loop, sysc_reschedule, cleanup_sysc_return
-cleanup_table_entries=(.-cleanup_table) / 12
+cleanup_table_system_call:
+ .long system_call + 0x80000000, sysc_do_svc + 0x80000000
+cleanup_table_sysc_return:
+ .long sysc_return + 0x80000000, sysc_leave + 0x80000000
+cleanup_table_sysc_leave:
+ .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
+cleanup_table_sysc_work_loop:
+ .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
cleanup_critical:
- lhi %r0,cleanup_table_entries
- la %r1,BASED(cleanup_table)
- l %r2,SP_PSW+4(%r15)
- la %r2,0(%r2)
-cleanup_loop:
- cl %r2,0(%r1)
- bl BASED(cleanup_cont)
- cl %r2,4(%r1)
- bl BASED(cleanup_found)
-cleanup_cont:
- la %r1,12(%r1)
- bct %r0,BASED(cleanup_loop)
+ clc 4(4,%r12),BASED(cleanup_table_system_call)
+ bl BASED(0f)
+ clc 4(4,%r12),BASED(cleanup_table_system_call+4)
+ bl BASED(cleanup_system_call)
+0:
+ clc 4(4,%r12),BASED(cleanup_table_sysc_return)
+ bl BASED(0f)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
+ bl BASED(cleanup_sysc_return)
+0:
+ clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
+ bl BASED(0f)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
+ bl BASED(cleanup_sysc_leave)
+0:
+ clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
+ bl BASED(0f)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
+ bl BASED(cleanup_sysc_leave)
+0:
br %r14
-cleanup_found:
- l %r1,8(%r1)
- br %r1
-cleanup_sysc_enter:
- CLEANUP_SAVE_ALL_BASE __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
- CLEANUP_SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
- lh %r0,0x8a
- st %r0,SP_R7(%r15)
- la %r1,BASED(sysc_enter)
- o %r1,BASED(.Lamode)
- st %r1,SP_PSW+4(%r15)
+cleanup_system_call:
+ mvc __LC_RETURN_PSW(4),0(%r12)
+ clc 4(4,%r12),BASED(cleanup_table_system_call)
+ bne BASED(0f)
+ mvc __LC_SAVE_AREA(16),__LC_SAVE_AREA+16
+0: st %r13,__LC_SAVE_AREA+20
+ SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ st %r15,__LC_SAVE_AREA+28
+ lh %r7,0x8a
+ mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
+ la %r12,__LC_RETURN_PSW
br %r14
cleanup_sysc_return:
- la %r1,BASED(sysc_return)
- o %r1,BASED(.Lamode)
- st %r1,SP_PSW+4(%r15)
+ mvc __LC_RETURN_PSW(4),0(%r12)
+ mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
+ la %r12,__LC_RETURN_PSW
br %r14
cleanup_sysc_leave:
- CLEANUP_RESTORE_ALL
+ clc 4(4,%r12),BASED(cleanup_sysc_leave_lpsw)
+ be BASED(0f)
+ mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
+ mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
+ lm %r0,%r11,SP_R0(%r15)
+ l %r15,SP_R15(%r15)
+0: la %r12,__LC_RETURN_PSW
br %r14
+cleanup_sysc_leave_lpsw:
+ .long sysc_leave + 10 + 0x80000000
/*
* Integer constants
.Lc_overhead: .long STACK_FRAME_OVERHEAD
.Lc_pactive: .long PREEMPT_ACTIVE
.Lnr_syscalls: .long NR_syscalls
-.L0x018: .long 0x018
-.L0x020: .long 0x020
-.L0x028: .long 0x028
-.L0x030: .long 0x030
-.L0x038: .long 0x038
-.Lamode: .long 0x80000000
+.L0x018: .short 0x018
+.L0x020: .short 0x020
+.L0x028: .short 0x028
+.L0x030: .short 0x030
+.L0x038: .short 0x038
/*
* Symbol constants
_TIF_RESTART_SVC | _TIF_SINGLE_STEP )
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
+#define BASED(name) name-system_call(%r13)
+
/*
* Register usage in interrupt handlers:
* R9 - pointer to current task structure
* R15 - kernel stack pointer
*/
+ .macro SAVE_ALL_BASE savearea
+ stmg %r12,%r15,\savearea
+ larl %r13,system_call
+ .endm
+
.macro SAVE_ALL psworg,savearea,sync
- stmg %r13,%r15,\savearea
+ la %r12,\psworg
.if \sync
- tm \psworg+1,0x01 # test problem state bit
- jz 1f # skip stack setup save
- lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
+ tm \psworg+1,0x01 # test problem state bit
+ jz 2f # skip stack setup save
+ lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
.else
- tm \psworg+1,0x01 # test problem state bit
- jnz 0f # from user -> load kernel stack
- lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
+ tm \psworg+1,0x01 # test problem state bit
+ jnz 1f # from user -> load kernel stack
+ clc \psworg+8(8),BASED(.Lcritical_end)
+ jhe 0f
+ clc \psworg+8(8),BASED(.Lcritical_start)
+ jl 0f
+ brasl %r14,cleanup_critical
+ tm 0(%r12),0x01 # retest problem state after cleanup
+ jnz 1f
+0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
slgr %r14,%r15
srag %r14,%r14,14
- jz 1f
-0: lg %r15,__LC_ASYNC_STACK # load async stack
+ jz 2f
+1: lg %r15,__LC_ASYNC_STACK # load async stack
.endif
-1: aghi %r15,-SP_SIZE # make room for registers & psw
- lghi %r14,\psworg
- slgr %r13,%r13
- icm %r14,12,__LC_SVC_ILC
- stmg %r0,%r12,SP_R0(%r15) # store gprs 0-13 to kernel stack
- stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- mvc SP_R13(24,%r15),\savearea # move r13, r14 and r15 to stack
- mvc SP_PSW(16,%r15),\psworg # move user PSW to stack
- st %r14,SP_ILC(%r15)
- stg %r13,0(%r15)
+2: aghi %r15,-SP_SIZE # make room for registers & psw
+ mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
+ la %r12,\psworg
+ stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
+ icm %r12,12,__LC_SVC_ILC
+ stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
+ st %r12,SP_ILC(%r15)
+ mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
+ la %r12,0
+ stg %r12,0(%r15)
.endm
- .macro CLEANUP_SAVE_ALL psworg,savearea,sync
- lg %r1,SP_PSW+8(%r15)
- cli 1(%r1),0xdf
- jne 2f
- mvc \savearea(24),SP_R13(%r15)
-2: lg %r1,\savearea+16
- .if \sync
- tm \psworg+1,0x01
- jz 1f
- lg %r1,__LC_KERNEL_STACK
- .else
- tm \psworg+1,0x01
- jnz 0f
- lg %r0,__LC_ASYNC_STACK
- slgr %r0,%r1
- srag %r0,%r0,14
- jz 1f
-0: lg %r1,__LC_ASYNC_STACK
+ .macro RESTORE_ALL sync
+ mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
+ .if !\sync
+ ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
.endif
-1: aghi %r1,-SP_SIZE
- stg %r1,SP_R15(%r15)
- lghi %r0,\psworg
- xc SP_R13(8,%r15),SP_R13(%r15)
- icm %r0,12,__LC_SVC_ILC
- stg %r0,SP_R14(%r15)
- mvc SP_R0(104,%r1),SP_R0(%r15)
- mvc SP_ORIG_R2(8,%r1),SP_R2(%r15)
- mvc SP_R13(24,%r1),\savearea
- mvc SP_PSW(16,%r1),\psworg
- st %r0,SP_ILC(%r1)
- xc 0(8,%r1),0(%r1)
- .endm
-
- .macro RESTORE_ALL # system exit macro
- mvc __LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
- ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
- lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
- lpswe __LC_RETURN_PSW # back to caller
- .endm
-
- .macro CLEANUP_RESTORE_ALL
- lg %r1,SP_PSW+8(%r15)
- cli 0(%r1),0xb2
- jne 0f
- mvc SP_PSW(16,%r15),__LC_RETURN_PSW
- j 1f
-0: lg %r1,SP_R15(%r15)
- mvc SP_PSW(16,%r15),SP_PSW(%r1)
- mvc SP_R0(128,%r15),SP_R0(%r1)
-1:
- .endm
-
- .macro GET_THREAD_INFO
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- .endm
-
- .macro CHECK_CRITICAL
- tm SP_PSW+1(%r15),0x01 # test problem state bit
- jnz 0f # from user -> not critical
- larl %r1,.Lcritical_start
- clc SP_PSW+8(8,%r15),8(%r1) # compare ip with __critical_end
- jnl 0f
- clc SP_PSW+8(8,%r15),0(%r1) # compare ip with __critical_start
- jl 0f
- brasl %r14,cleanup_critical
-0:
+ lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
+ lpswe __LC_RETURN_PSW # back to caller
.endm
/*
.globl system_call
system_call:
+ SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
-sysc_enter:
- GET_THREAD_INFO # load pointer to task_struct to R9
sysc_do_svc:
+ lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
slag %r7,%r7,2 # *4 and test for svc 0
jnz sysc_nr_ok
# svc 0: system call number in %r1
- lghi %r0,NR_syscalls
- clr %r1,%r0
+ cl %r1,BASED(.Lnr_syscalls)
jnl sysc_nr_ok
lgfr %r7,%r1 # clear high word in r1
slag %r7,%r7,2 # svc 0: system call number in %r1
tm __TI_flags+7(%r9),_TIF_WORK_SVC
jnz sysc_work # there is work to do (signals etc.)
sysc_leave:
- RESTORE_ALL
+ RESTORE_ALL 1
#
# recheck if there is more work to do
#
sysc_work_loop:
- GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+7(%r9),_TIF_WORK_SVC
jz sysc_leave # there is no work to do
#
# a new process exits the kernel with ret_from_fork
#
.globl ret_from_fork
-ret_from_fork:
- GET_THREAD_INFO # load pointer to task_struct to R9
+ret_from_fork:
+ lg %r13,__LC_SVC_NEW_PSW+8
+ lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
brasl %r14,schedule_tail
stosm 24(%r15),0x03 # reenable interrupts
j sysc_return
* we just ignore the PER event (FIXME: is there anything we have to do
* for LPSW?).
*/
+ SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
jnz pgm_per # got per exception -> special case
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+ lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
ngr %r8,%r3
pgm_do_call:
sll %r8,3
- GET_THREAD_INFO
larl %r1,pgm_check_table
lg %r1,0(%r8,%r1) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
je pgm_svcper
# no interesting special case, ignore PER event
+ lmg %r12,%r15,__LC_SAVE_AREA
lpswe __LC_PGM_OLD_PSW
#
#
pgm_per_std:
SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
- GET_THREAD_INFO
+ lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lg %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
pgm_svcper:
SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
- GET_THREAD_INFO # load pointer to task_struct to R9
+ lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lg %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
*/
.globl io_int_handler
io_int_handler:
- SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
stck __LC_INT_CLOCK
- CHECK_CRITICAL
- GET_THREAD_INFO # load pointer to task_struct to R9
+ SAVE_ALL_BASE __LC_SAVE_AREA+32
+ SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
+ lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_IRQ # call standard irq handler
tm __TI_flags+7(%r9),_TIF_WORK_INT
jnz io_work # there is work to do (signals etc.)
io_leave:
- RESTORE_ALL
+ RESTORE_ALL 0
#ifdef CONFIG_PREEMPT
io_preempt:
stosm 48(%r15),0x03 # reenable interrupts
brasl %r14,schedule # call schedule
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
- GET_THREAD_INFO # load pointer to task_struct to R9
xc __TI_precount(4,%r9),__TI_precount(%r9)
j io_resume_loop
#endif
stosm 48(%r15),0x03 # reenable interrupts
brasl %r14,schedule # call scheduler
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
- GET_THREAD_INFO # load pointer to task_struct to R9
tm __TI_flags+7(%r9),_TIF_WORK_INT
jz io_leave # there is no work to do
j io_work_loop
*/
.globl ext_int_handler
ext_int_handler:
- SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
- CHECK_CRITICAL
- GET_THREAD_INFO # load pointer to task_struct to R9
stck __LC_INT_CLOCK
+ SAVE_ALL_BASE __LC_SAVE_AREA+32
+ SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
+ lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # address of register-save area
llgh %r3,__LC_EXT_INT_CODE # get interruption code
brasl %r14,do_extint
*/
.globl mcck_int_handler
mcck_int_handler:
+ SAVE_ALL_BASE __LC_SAVE_AREA+64
SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0
brasl %r14,s390_do_machine_check
mcck_return:
- RESTORE_ALL
+ RESTORE_ALL 0
#ifdef CONFIG_SMP
/*
restart_go:
#endif
-cleanup_table:
- .quad system_call, sysc_enter, cleanup_sysc_enter
- .quad sysc_return, sysc_leave, cleanup_sysc_return
- .quad sysc_leave, sysc_work_loop, cleanup_sysc_leave
- .quad sysc_work_loop, sysc_reschedule, cleanup_sysc_return
-cleanup_table_entries=(.-cleanup_table) / 24
+cleanup_table_system_call:
+ .quad system_call, sysc_do_svc
+cleanup_table_sysc_return:
+ .quad sysc_return, sysc_leave
+cleanup_table_sysc_leave:
+ .quad sysc_leave, sysc_work_loop
+cleanup_table_sysc_work_loop:
+ .quad sysc_work_loop, sysc_reschedule
cleanup_critical:
- lghi %r0,cleanup_table_entries
- larl %r1,cleanup_table
- lg %r2,SP_PSW+8(%r15)
-cleanup_loop:
- clg %r2,0(%r1)
- jl cleanup_cont
- clg %r2,8(%r1)
- jl cleanup_found
-cleanup_cont:
- la %r1,24(%r1)
- brct %r0,cleanup_loop
+ clc 8(8,%r12),BASED(cleanup_table_system_call)
+ jl 0f
+ clc 8(8,%r12),BASED(cleanup_table_system_call+8)
+ jl cleanup_system_call
+0:
+ clc 8(8,%r12),BASED(cleanup_table_sysc_return)
+ jl 0f
+ clc 8(8,%r12),BASED(cleanup_table_sysc_return+8)
+ jl cleanup_sysc_return
+0:
+ clc 8(8,%r12),BASED(cleanup_table_sysc_leave)
+ jl 0f
+ clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8)
+ jl cleanup_sysc_leave
+0:
+ clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop)
+ jl 0f
+ clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
+ jl cleanup_sysc_leave
+0:
br %r14
-cleanup_found:
- lg %r1,16(%r1)
- br %r1
-
-cleanup_sysc_enter:
- CLEANUP_SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
- llgh %r0,0x8a
- stg %r0,SP_R7(%r15)
- larl %r1,sysc_enter
- stg %r1,SP_PSW+8(%r15)
+
+cleanup_system_call:
+ mvc __LC_RETURN_PSW(8),0(%r12)
+ clc 8(8,%r12),BASED(cleanup_table_system_call)
+ jne 0f
+ mvc __LC_SAVE_AREA(32),__LC_SAVE_AREA+32
+0: stg %r13,__LC_SAVE_AREA+40
+ SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+ stg %r15,__LC_SAVE_AREA+56
+ llgh %r7,__LC_SVC_INT_CODE
+ mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
+ la %r12,__LC_RETURN_PSW
br %r14
cleanup_sysc_return:
- larl %r1,sysc_return
- stg %r1,SP_PSW+8(%r15)
+ mvc __LC_RETURN_PSW(8),0(%r12)
+ mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
+ la %r12,__LC_RETURN_PSW
br %r14
cleanup_sysc_leave:
- CLEANUP_RESTORE_ALL
+ clc 8(8,%r12),BASED(cleanup_sysc_leave_lpsw)
+ je 0f
+ mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
+ mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
+ lmg %r0,%r11,SP_R0(%r15)
+ lg %r15,SP_R15(%r15)
+0: la %r12,__LC_RETURN_PSW
br %r14
+cleanup_sysc_leave_lpsw:
+ .quad sysc_leave + 12
/*
* Integer constants
.align 4
.Lconst:
.Lc_pactive: .long PREEMPT_ACTIVE
+.Lnr_syscalls: .long NR_syscalls
+.L0x0130: .short 0x130
+.L0x0140: .short 0x140
+.L0x0150: .short 0x150
+.L0x0160: .short 0x160
+.L0x0170: .short 0x170
.Lcritical_start:
.quad __critical_start
.Lcritical_end:
copied += sizeof(unsigned int);
}
return 0;
+ case PTRACE_GETEVENTMSG:
+ return put_user((__u32) child->ptrace_message,
+ (unsigned int __user *) data);
+ case PTRACE_GETSIGINFO:
+ if (child->last_siginfo == NULL)
+ return -EINVAL;
+ return copy_siginfo_to_user32((siginfo_t32 __user *) data,
+ child->last_siginfo);
+ case PTRACE_SETSIGINFO:
+ if (child->last_siginfo == NULL)
+ return -EINVAL;
+ return copy_siginfo_from_user32(child->last_siginfo,
+ (siginfo_t32 __user *) data);
}
return ptrace_request(child, request, addr, data);
}
return s;
}
EXPORT_SYMBOL_NOVERS(memset);
-
-/*
- * missing exports for string functions defined in lib/string.c
- */
-EXPORT_SYMBOL_NOVERS(memmove);
-EXPORT_SYMBOL_NOVERS(strchr);
-EXPORT_SYMBOL_NOVERS(strnchr);
-EXPORT_SYMBOL_NOVERS(strncmp);
-EXPORT_SYMBOL_NOVERS(strpbrk);
# Makefile for the linux s390-specific parts of the memory manager.
#
-obj-y := init.o fault.o ioremap.o extmem.o
+obj-y := init.o fault.o ioremap.o extmem.o mmap.o
obj-$(CONFIG_CMM) += cmm.o
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
+#include <asm/smp.h>
#include "../../../drivers/s390/net/smsgiucv.h"
static int
cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
char buf[16], *p;
long pages;
int len;
- if (!*lenp || (filp->f_pos && !write)) {
+ if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
return -EFAULT;
}
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return 0;
}
static int
cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
long pages, seconds;
int len;
- if (!*lenp || (filp->f_pos && !write)) {
+ if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
return -EFAULT;
}
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return 0;
}
config SMP
bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
+ depends on BROKEN
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
config SUN4
bool "Support for SUN4 machines (disables SUN4[CDM] support)"
+ depends on !SMP
help
Say Y here if, and only if, your machine is a sun4. Note that
a kernel compiled with this option will run only on sun4.
volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS];
cycles_t cacheflush_time = 0; /* XXX */
+unsigned long cache_decay_ticks = 100;
cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
}
}
+void smp_reschedule_irq(void)
+{
+ set_need_resched();
+}
+
void smp_flush_page_to_ram(unsigned long page)
{
/* Current theory is that those who call this are the one's
extern void calibrate_delay(void);
extern volatile int smp_processors_ready;
-extern unsigned long cpu_present_map;
extern int smp_num_cpus;
static int smp_highest_cpu;
extern int smp_threads_ready;
* the SMP initialization the master will be just allowed
* to call the scheduler code.
*/
- init_idle();
-
/* Get our local ticker going. */
smp_setup_percpu_timer();
extern int cpu_idle(void *unused);
extern void init_IRQ(void);
extern void cpu_panic(void);
-extern int start_secondary(void *unused);
/*
* Cycle through the processors asking the PROM to start each one.
current_set[0] = NULL;
local_irq_enable();
- cpu_present_map = 0;
+ cpus_clear(cpu_present_map);
/* XXX This whole thing has to go. See sparc64. */
for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
- cpu_present_map |= (1<<mid);
- SMP_PRINTK(("cpu_present_map %08lx\n", cpu_present_map));
+ cpu_set(mid, cpu_present_map);
+ SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0]));
for(i=0; i < NR_CPUS; i++)
__cpu_number_map[i] = -1;
for(i=0; i < NR_CPUS; i++)
if(i == boot_cpu_id)
continue;
- if(cpu_present_map & (1 << i)) {
+ if (cpu_isset(i, cpu_present_map)) {
extern unsigned long sun4d_cpu_startup;
unsigned long *entry = &sun4d_cpu_startup;
struct task_struct *p;
}
}
if(!(cpu_callin_map[i])) {
- cpu_present_map &= ~(1 << i);
+ cpu_clear(i, cpu_present_map);
__cpu_number_map[i] = -1;
}
}
local_flush_cache_all();
if(cpucount == 0) {
printk("Error: only one Processor found.\n");
- cpu_present_map = (1 << hard_smp4d_processor_id());
+ cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id());
} else {
unsigned long bogosum = 0;
for(i = 0; i < NR_CPUS; i++) {
- if(cpu_present_map & (1 << i)) {
+ if (cpu_isset(i, cpu_present_map)) {
bogosum += cpu_data(i).udelay_val;
smp_highest_cpu = i;
}
/* Init receive/complete mapping, plus fire the IPI's off. */
{
- register unsigned long mask;
+ cpumask_t mask;
register int i;
- mask = (cpu_present_map & ~(1 << hard_smp4d_processor_id()));
+ mask = cpumask_of_cpu(hard_smp4d_processor_id());
+ cpus_andnot(mask, cpu_present_map, mask);
for(i = 0; i <= high; i++) {
- if(mask & (1 << i)) {
+ if (cpu_isset(i, mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
sun4d_send_ipi(i, IRQ_CROSS_CALL);
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
/* And set btfixup... */
- BTFIXUPSET_BLACKBOX(smp_processor_id, smp4d_blackbox_id);
+ BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_message_pass, smp4d_message_pass, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(__smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
for (i = 0; i < NR_CPUS; i++) {
ccall_info.processors_in[i] = 1;
* the SMP initialization the master will be just allowed
* to call the scheduler code.
*/
- init_idle();
-
/* Allow master to continue. */
swap((unsigned long *)&cpu_callin_map[cpuid], 1);
extern int cpu_idle(void *unused);
extern void init_IRQ(void);
extern void cpu_panic(void);
-extern int start_secondary(void *unused);
/*
* Cycle through the processors asking the PROM to start each one.
void __init sun4m_init_smp(void)
{
- BTFIXUPSET_BLACKBOX(smp_processor_id, smp4m_blackbox_id);
+ BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(__smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
}
sun4_esp_physaddr=SUN4_400_ESP_PHYSADDR;
break;
default:
+ ;
}
}
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
- map_flags, vma->vm_flags & VM_EXEC);
+ map_flags);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
.align 4
smp_do_cpu_idle:
- call init_idle
- nop
call cpu_idle
mov 0, %o0
/* Both these macros have to start with exactly the same insn */
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + offset + 0x00], %t0; \
- ldd [%src + offset + 0x08], %t2; \
- ldd [%src + offset + 0x10], %t4; \
- ldd [%src + offset + 0x18], %t6; \
- st %t0, [%dst + offset + 0x00]; \
- st %t1, [%dst + offset + 0x04]; \
- st %t2, [%dst + offset + 0x08]; \
- st %t3, [%dst + offset + 0x0c]; \
- st %t4, [%dst + offset + 0x10]; \
- st %t5, [%dst + offset + 0x14]; \
- st %t6, [%dst + offset + 0x18]; \
- st %t7, [%dst + offset + 0x1c];
+ ldd [%src + (offset) + 0x00], %t0; \
+ ldd [%src + (offset) + 0x08], %t2; \
+ ldd [%src + (offset) + 0x10], %t4; \
+ ldd [%src + (offset) + 0x18], %t6; \
+ st %t0, [%dst + (offset) + 0x00]; \
+ st %t1, [%dst + (offset) + 0x04]; \
+ st %t2, [%dst + (offset) + 0x08]; \
+ st %t3, [%dst + (offset) + 0x0c]; \
+ st %t4, [%dst + (offset) + 0x10]; \
+ st %t5, [%dst + (offset) + 0x14]; \
+ st %t6, [%dst + (offset) + 0x18]; \
+ st %t7, [%dst + (offset) + 0x1c];
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + offset + 0x00], %t0; \
- ldd [%src + offset + 0x08], %t2; \
- ldd [%src + offset + 0x10], %t4; \
- ldd [%src + offset + 0x18], %t6; \
- std %t0, [%dst + offset + 0x00]; \
- std %t2, [%dst + offset + 0x08]; \
- std %t4, [%dst + offset + 0x10]; \
- std %t6, [%dst + offset + 0x18];
+ ldd [%src + (offset) + 0x00], %t0; \
+ ldd [%src + (offset) + 0x08], %t2; \
+ ldd [%src + (offset) + 0x10], %t4; \
+ ldd [%src + (offset) + 0x18], %t6; \
+ std %t0, [%dst + (offset) + 0x00]; \
+ std %t2, [%dst + (offset) + 0x08]; \
+ std %t4, [%dst + (offset) + 0x10]; \
+ std %t6, [%dst + (offset) + 0x18];
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - offset - 0x10], %t0; \
- ldd [%src - offset - 0x08], %t2; \
- st %t0, [%dst - offset - 0x10]; \
- st %t1, [%dst - offset - 0x0c]; \
- st %t2, [%dst - offset - 0x08]; \
- st %t3, [%dst - offset - 0x04];
+ ldd [%src - (offset) - 0x10], %t0; \
+ ldd [%src - (offset) - 0x08], %t2; \
+ st %t0, [%dst - (offset) - 0x10]; \
+ st %t1, [%dst - (offset) - 0x0c]; \
+ st %t2, [%dst - (offset) - 0x08]; \
+ st %t3, [%dst - (offset) - 0x04];
#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
- lduh [%src + offset + 0x00], %t0; \
- lduh [%src + offset + 0x02], %t1; \
- lduh [%src + offset + 0x04], %t2; \
- lduh [%src + offset + 0x06], %t3; \
- sth %t0, [%dst + offset + 0x00]; \
- sth %t1, [%dst + offset + 0x02]; \
- sth %t2, [%dst + offset + 0x04]; \
- sth %t3, [%dst + offset + 0x06];
+ lduh [%src + (offset) + 0x00], %t0; \
+ lduh [%src + (offset) + 0x02], %t1; \
+ lduh [%src + (offset) + 0x04], %t2; \
+ lduh [%src + (offset) + 0x06], %t3; \
+ sth %t0, [%dst + (offset) + 0x00]; \
+ sth %t1, [%dst + (offset) + 0x02]; \
+ sth %t2, [%dst + (offset) + 0x04]; \
+ sth %t3, [%dst + (offset) + 0x06];
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src - offset - 0x02], %t0; \
- ldub [%src - offset - 0x01], %t1; \
- stb %t0, [%dst - offset - 0x02]; \
- stb %t1, [%dst - offset - 0x01];
+ ldub [%src - (offset) - 0x02], %t0; \
+ ldub [%src - (offset) - 0x01], %t1; \
+ stb %t0, [%dst - (offset) - 0x02]; \
+ stb %t1, [%dst - (offset) - 0x01];
.text
.align 4
#endif
/* Both these macros have to start with exactly the same insn */
-#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + offset + 0x00], %t0; \
- ldd [%src + offset + 0x08], %t2; \
- ldd [%src + offset + 0x10], %t4; \
- ldd [%src + offset + 0x18], %t6; \
- st %t0, [%dst + offset + 0x00]; \
- st %t1, [%dst + offset + 0x04]; \
- st %t2, [%dst + offset + 0x08]; \
- st %t3, [%dst + offset + 0x0c]; \
- st %t4, [%dst + offset + 0x10]; \
- st %t5, [%dst + offset + 0x14]; \
- st %t6, [%dst + offset + 0x18]; \
- st %t7, [%dst + offset + 0x1c];
-
-#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + offset + 0x00], %t0; \
- ldd [%src + offset + 0x08], %t2; \
- ldd [%src + offset + 0x10], %t4; \
- ldd [%src + offset + 0x18], %t6; \
- std %t0, [%dst + offset + 0x00]; \
- std %t2, [%dst + offset + 0x08]; \
- std %t4, [%dst + offset + 0x10]; \
- std %t6, [%dst + offset + 0x18];
-
-#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - offset - 0x10], %t0; \
- ldd [%src - offset - 0x08], %t2; \
- st %t0, [%dst - offset - 0x10]; \
- st %t1, [%dst - offset - 0x0c]; \
- st %t2, [%dst - offset - 0x08]; \
- st %t3, [%dst - offset - 0x04];
-
-#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - offset - 0x10], %t0; \
- ldd [%src - offset - 0x08], %t2; \
- std %t0, [%dst - offset - 0x10]; \
- std %t2, [%dst - offset - 0x08];
-
-#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src - offset - 0x02], %t0; \
- ldub [%src - offset - 0x01], %t1; \
- stb %t0, [%dst - offset - 0x02]; \
- stb %t1, [%dst - offset - 0x01];
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + (offset) + 0x00], %t0; \
+ ldd [%src + (offset) + 0x08], %t2; \
+ ldd [%src + (offset) + 0x10], %t4; \
+ ldd [%src + (offset) + 0x18], %t6; \
+ st %t0, [%dst + (offset) + 0x00]; \
+ st %t1, [%dst + (offset) + 0x04]; \
+ st %t2, [%dst + (offset) + 0x08]; \
+ st %t3, [%dst + (offset) + 0x0c]; \
+ st %t4, [%dst + (offset) + 0x10]; \
+ st %t5, [%dst + (offset) + 0x14]; \
+ st %t6, [%dst + (offset) + 0x18]; \
+ st %t7, [%dst + (offset) + 0x1c];
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + (offset) + 0x00], %t0; \
+ ldd [%src + (offset) + 0x08], %t2; \
+ ldd [%src + (offset) + 0x10], %t4; \
+ ldd [%src + (offset) + 0x18], %t6; \
+ std %t0, [%dst + (offset) + 0x00]; \
+ std %t2, [%dst + (offset) + 0x08]; \
+ std %t4, [%dst + (offset) + 0x10]; \
+ std %t6, [%dst + (offset) + 0x18];
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src - (offset) - 0x10], %t0; \
+ ldd [%src - (offset) - 0x08], %t2; \
+ st %t0, [%dst - (offset) - 0x10]; \
+ st %t1, [%dst - (offset) - 0x0c]; \
+ st %t2, [%dst - (offset) - 0x08]; \
+ st %t3, [%dst - (offset) - 0x04];
+
+#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src - (offset) - 0x10], %t0; \
+ ldd [%src - (offset) - 0x08], %t2; \
+ std %t0, [%dst - (offset) - 0x10]; \
+ std %t2, [%dst - (offset) - 0x08];
+
+#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+ ldub [%src - (offset) - 0x02], %t0; \
+ ldub [%src - (offset) - 0x01], %t1; \
+ stb %t0, [%dst - (offset) - 0x02]; \
+ stb %t1, [%dst - (offset) - 0x01];
/* Both these macros have to start with exactly the same insn */
-#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src - offset - 0x20], %t0; \
- ldd [%src - offset - 0x18], %t2; \
- ldd [%src - offset - 0x10], %t4; \
- ldd [%src - offset - 0x08], %t6; \
- st %t0, [%dst - offset - 0x20]; \
- st %t1, [%dst - offset - 0x1c]; \
- st %t2, [%dst - offset - 0x18]; \
- st %t3, [%dst - offset - 0x14]; \
- st %t4, [%dst - offset - 0x10]; \
- st %t5, [%dst - offset - 0x0c]; \
- st %t6, [%dst - offset - 0x08]; \
- st %t7, [%dst - offset - 0x04];
-
-#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src - offset - 0x20], %t0; \
- ldd [%src - offset - 0x18], %t2; \
- ldd [%src - offset - 0x10], %t4; \
- ldd [%src - offset - 0x08], %t6; \
- std %t0, [%dst - offset - 0x20]; \
- std %t2, [%dst - offset - 0x18]; \
- std %t4, [%dst - offset - 0x10]; \
- std %t6, [%dst - offset - 0x08];
-
-#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src + offset + 0x00], %t0; \
- ldd [%src + offset + 0x08], %t2; \
- st %t0, [%dst + offset + 0x00]; \
- st %t1, [%dst + offset + 0x04]; \
- st %t2, [%dst + offset + 0x08]; \
- st %t3, [%dst + offset + 0x0c];
-
-#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src + offset + 0x00], %t0; \
- ldub [%src + offset + 0x01], %t1; \
- stb %t0, [%dst + offset + 0x00]; \
- stb %t1, [%dst + offset + 0x01];
-
-#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
- ldd [%src + offset + 0x00], %t0; \
- ldd [%src + offset + 0x08], %t2; \
- srl %t0, shir, %t5; \
- srl %t1, shir, %t6; \
- sll %t0, shil, %t0; \
- or %t5, %prev, %t5; \
- sll %t1, shil, %prev; \
- or %t6, %t0, %t0; \
- srl %t2, shir, %t1; \
- srl %t3, shir, %t6; \
- sll %t2, shil, %t2; \
- or %t1, %prev, %t1; \
- std %t4, [%dst + offset + offset2 - 0x04]; \
- std %t0, [%dst + offset + offset2 + 0x04]; \
- sll %t3, shil, %prev; \
+#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src - (offset) - 0x20], %t0; \
+ ldd [%src - (offset) - 0x18], %t2; \
+ ldd [%src - (offset) - 0x10], %t4; \
+ ldd [%src - (offset) - 0x08], %t6; \
+ st %t0, [%dst - (offset) - 0x20]; \
+ st %t1, [%dst - (offset) - 0x1c]; \
+ st %t2, [%dst - (offset) - 0x18]; \
+ st %t3, [%dst - (offset) - 0x14]; \
+ st %t4, [%dst - (offset) - 0x10]; \
+ st %t5, [%dst - (offset) - 0x0c]; \
+ st %t6, [%dst - (offset) - 0x08]; \
+ st %t7, [%dst - (offset) - 0x04];
+
+#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src - (offset) - 0x20], %t0; \
+ ldd [%src - (offset) - 0x18], %t2; \
+ ldd [%src - (offset) - 0x10], %t4; \
+ ldd [%src - (offset) - 0x08], %t6; \
+ std %t0, [%dst - (offset) - 0x20]; \
+ std %t2, [%dst - (offset) - 0x18]; \
+ std %t4, [%dst - (offset) - 0x10]; \
+ std %t6, [%dst - (offset) - 0x08];
+
+#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src + (offset) + 0x00], %t0; \
+ ldd [%src + (offset) + 0x08], %t2; \
+ st %t0, [%dst + (offset) + 0x00]; \
+ st %t1, [%dst + (offset) + 0x04]; \
+ st %t2, [%dst + (offset) + 0x08]; \
+ st %t3, [%dst + (offset) + 0x0c];
+
+#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+ ldub [%src + (offset) + 0x00], %t0; \
+ ldub [%src + (offset) + 0x01], %t1; \
+ stb %t0, [%dst + (offset) + 0x00]; \
+ stb %t1, [%dst + (offset) + 0x01];
+
+#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
+ ldd [%src + (offset) + 0x00], %t0; \
+ ldd [%src + (offset) + 0x08], %t2; \
+ srl %t0, shir, %t5; \
+ srl %t1, shir, %t6; \
+ sll %t0, shil, %t0; \
+ or %t5, %prev, %t5; \
+ sll %t1, shil, %prev; \
+ or %t6, %t0, %t0; \
+ srl %t2, shir, %t1; \
+ srl %t3, shir, %t6; \
+ sll %t2, shil, %t2; \
+ or %t1, %prev, %t1; \
+ std %t4, [%dst + (offset) + (offset2) - 0x04]; \
+ std %t0, [%dst + (offset) + (offset2) + 0x04]; \
+ sll %t3, shil, %prev; \
or %t6, %t2, %t4;
-#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
- ldd [%src + offset + 0x00], %t0; \
- ldd [%src + offset + 0x08], %t2; \
- srl %t0, shir, %t4; \
- srl %t1, shir, %t5; \
- sll %t0, shil, %t6; \
- or %t4, %prev, %t0; \
- sll %t1, shil, %prev; \
- or %t5, %t6, %t1; \
- srl %t2, shir, %t4; \
- srl %t3, shir, %t5; \
- sll %t2, shil, %t6; \
- or %t4, %prev, %t2; \
- sll %t3, shil, %prev; \
- or %t5, %t6, %t3; \
- std %t0, [%dst + offset + offset2 + 0x00]; \
- std %t2, [%dst + offset + offset2 + 0x08];
+#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
+ ldd [%src + (offset) + 0x00], %t0; \
+ ldd [%src + (offset) + 0x08], %t2; \
+ srl %t0, shir, %t4; \
+ srl %t1, shir, %t5; \
+ sll %t0, shil, %t6; \
+ or %t4, %prev, %t0; \
+ sll %t1, shil, %prev; \
+ or %t5, %t6, %t1; \
+ srl %t2, shir, %t4; \
+ srl %t3, shir, %t5; \
+ sll %t2, shil, %t6; \
+ or %t4, %prev, %t2; \
+ sll %t3, shil, %prev; \
+ or %t5, %t6, %t3; \
+ std %t0, [%dst + (offset) + (offset2) + 0x00]; \
+ std %t2, [%dst + (offset) + (offset2) + 0x08];
.text
.align 4
static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- struct mm_struct *mm = vma->vm_mm;
-
- FLUSH_BEGIN(mm)
+ FLUSH_BEGIN(vma->vm_mm)
flush_user_windows();
turbosparc_idflash_clear();
FLUSH_END
static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- struct mm_struct *mm = vma->vm_mm;
-
- FLUSH_BEGIN(mm)
+ FLUSH_BEGIN(vma->vm_mm)
srmmu_flush_whole_tlb();
FLUSH_END
}
fly. Currently there are only sparc64 drivers for UltraSPARC-III
and UltraSPARC-IIe processors.
- For details, take a look at linux/Documentation/cpu-freq.
+ For details, take a look at <file:Documentation/cpu-freq>.
If in doubt, say N.
help
This adds the CPUFreq driver for UltraSPARC-III processors.
- For details, take a look at linux/Documentation/cpu-freq.
+ For details, take a look at <file:Documentation/cpu-freq>.
If in doubt, say N.
help
This adds the CPUFreq driver for UltraSPARC-IIe processors.
- For details, take a look at linux/Documentation/cpu-freq.
+ For details, take a look at <file:Documentation/cpu-freq>.
If in doubt, say N.
config SUNOS_EMUL
bool "SunOS binary emulation"
+ depends on BINFMT_AOUT32
help
This allows you to run most SunOS binaries. If you want to do this,
say Y here and place appropriate files in /usr/gnemul/sunos. See
config SOLARIS_EMUL
tristate "Solaris binary emulation (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ depends on SPARC32_COMPAT && EXPERIMENTAL
help
This is experimental code which will enable you to run (many)
Solaris binaries on your SPARC Linux machine.
# CONFIG_BINFMT_AOUT32 is not set
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
-# CONFIG_SUNOS_EMUL is not set
CONFIG_SOLARIS_EMUL=m
#
# CONFIG_FB_CIRRUS is not set
CONFIG_FB_PM2=y
# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
-# CONFIG_FB_CYBER2000 is not set
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_BW2 is not set
#
# Serial drivers
#
-# CONFIG_SERIAL_8250 is not set
#
# Non-8250 serial port support
CONFIG_SUN_MOSTEK_RTC=y
CONFIG_OBP_FLASH=m
# CONFIG_SUN_BPP is not set
-# CONFIG_SUN_VIDEOPIX is not set
-# CONFIG_SUN_AURORA is not set
#
# Memory Technology Devices (MTD)
CONFIG_SCSI_SATA_SIS=m
CONFIG_SCSI_SATA_VIA=m
CONFIG_SCSI_SATA_VITESSE=m
-# CONFIG_SCSI_BUSLOGIC is not set
CONFIG_SCSI_DMX3191D=m
-# CONFIG_SCSI_EATA is not set
CONFIG_SCSI_EATA_PIO=m
# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_GDTH is not set
CONFIG_SCSI_IPS=m
CONFIG_SCSI_INIA100=m
CONFIG_SCSI_PPA=m
# CONFIG_SCSI_QLA6312 is not set
# CONFIG_SCSI_QLA6322 is not set
CONFIG_SCSI_DC395x=m
-CONFIG_SCSI_DC390T=m
+# CONFIG_SCSI_DC390T is not set
CONFIG_SCSI_DEBUG=m
CONFIG_SCSI_SUNESP=y
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
+# CONFIG_NET_SCH_CLK_JIFFIES is not set
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+CONFIG_NET_SCH_CLK_CPU=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
#
# Wireless 802.11b ISA/PCI cards support
#
-CONFIG_AIRO=m
CONFIG_HERMES=m
CONFIG_PLX_HERMES=m
CONFIG_TMD_HERMES=m
#
CONFIG_I2C_SENSOR=m
CONFIG_SENSORS_ADM1021=m
+CONFIG_SENSORS_ADM1025=m
+CONFIG_SENSORS_ADM1031=m
CONFIG_SENSORS_ASB100=m
CONFIG_SENSORS_DS1621=m
CONFIG_SENSORS_FSCHER=m
CONFIG_SENSORS_GL518SM=m
CONFIG_SENSORS_IT87=m
CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM77=m
CONFIG_SENSORS_LM78=m
CONFIG_SENSORS_LM80=m
CONFIG_SENSORS_LM83=m
# CONFIG_BEFS_DEBUG is not set
CONFIG_BFS_FS=m
CONFIG_EFS_FS=m
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
CONFIG_HPFS_FS=m
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_XATTR is not set
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
# CONFIG_NCPFS_PACKET_SIGNING is not set
CONFIG_SND_SUN_AMD7930=m
CONFIG_SND_SUN_CS4231=m
-#
-# Open Sound System
-#
-# CONFIG_SOUND_PRIME is not set
-
#
# USB support
#
# CONFIG_USB_OV511 is not set
CONFIG_USB_PWC=m
# CONFIG_USB_SE401 is not set
+CONFIG_USB_SN9C102=m
# CONFIG_USB_STV680 is not set
CONFIG_USB_W9968CF=m
loff_t pos = fd_offset;
/* Fuck me plenty... */
error = do_brk(N_TXTADDR(ex), ex.a_text);
- bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
ex.a_text, &pos);
error = do_brk(N_DATADDR(ex), ex.a_data);
- bprm->file->f_op->read(bprm->file, (char *) N_DATADDR(ex),
+ bprm->file->f_op->read(bprm->file, (char __user *)N_DATADDR(ex),
ex.a_data, &pos);
goto beyond_if;
}
loff_t pos = fd_offset;
do_brk(N_TXTADDR(ex) & PAGE_MASK,
ex.a_text+ex.a_data + PAGE_SIZE - 1);
- bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
ex.a_text+ex.a_data, &pos);
} else {
static unsigned long error_time;
if (!bprm->file->f_op->mmap) {
loff_t pos = fd_offset;
do_brk(0, ex.a_text+ex.a_data);
- bprm->file->f_op->read(bprm->file,(char *)N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file,
+ (char __user *)N_TXTADDR(ex),
ex.a_text+ex.a_data, &pos);
goto beyond_if;
}
/* SunOS's execv() call only specifies the argv argument, the
* environment settings are the same as the calling processes.
*/
- .globl sunos_execv, sys_execve, sys32_execve
+ .globl sunos_execv
sys_execve:
sethi %hi(sparc_execve), %g1
ba,pt %xcc, execve_merge
or %g1, %lo(sparc_execve), %g1
+#ifdef CONFIG_COMPAT
+ .globl sys_execve
sunos_execv:
stx %g0, [%sp + PTREGS_OFF + PT_V9_I2]
+ .globl sys32_execve
sys32_execve:
sethi %hi(sparc32_execve), %g1
or %g1, %lo(sparc32_execve), %g1
+#endif
execve_merge:
flushw
jmpl %g1, %g0
add %sp, PTREGS_OFF, %o0
.globl sys_pipe, sys_sigpause, sys_nis_syscall
- .globl sys_sigsuspend, sys_rt_sigsuspend, sys32_rt_sigsuspend
+ .globl sys_sigsuspend, sys_rt_sigsuspend
.globl sys_rt_sigreturn
- .globl sys32_sigreturn, sys32_rt_sigreturn
- .globl sys32_execve, sys_ptrace
- .globl sys_sigaltstack, sys32_sigaltstack
- .globl sys32_sigstack
+ .globl sys_ptrace
+ .globl sys_sigaltstack
.align 32
sys_pipe: ba,pt %xcc, sparc_pipe
add %sp, PTREGS_OFF, %o0
add %sp, PTREGS_OFF, %o1
sys_sigaltstack:ba,pt %xcc, do_sigaltstack
add %i6, STACK_BIAS, %o2
+#ifdef CONFIG_COMPAT
+ .globl sys32_sigstack
sys32_sigstack: ba,pt %xcc, do_sys32_sigstack
mov %i6, %o2
+ .globl sys32_sigaltstack
sys32_sigaltstack:
ba,pt %xcc, do_sys32_sigaltstack
mov %i6, %o2
-
+#endif
.align 32
sys_sigsuspend: add %sp, PTREGS_OFF, %o0
call do_sigsuspend
call do_rt_sigsuspend
add %o7, 1f-.-4, %o7
nop
+#ifdef CONFIG_COMPAT
+ .globl sys32_rt_sigsuspend
sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
srl %o0, 0, %o0
add %sp, PTREGS_OFF, %o2
call do_rt_sigsuspend32
add %o7, 1f-.-4, %o7
+#endif
/* NOTE: %o0 has a correct value already */
sys_sigpause: add %sp, PTREGS_OFF, %o1
call do_sigpause
add %o7, 1f-.-4, %o7
nop
+#ifdef CONFIG_COMPAT
+ .globl sys32_sigreturn
sys32_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_sigreturn32
add %o7, 1f-.-4, %o7
nop
+#endif
sys_rt_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_rt_sigreturn
add %o7, 1f-.-4, %o7
nop
+#ifdef CONFIG_COMPAT
+ .globl sys32_rt_sigreturn
sys32_rt_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_rt_sigreturn32
add %o7, 1f-.-4, %o7
nop
+#endif
sys_ptrace: add %sp, PTREGS_OFF, %o0
call do_ptrace
add %o7, 1f-.-4, %o7
/* Use this to get at 32-bit user passed pointers.
* See sys_sparc32.c for description about it.
*/
-#define A(__x) ((void __user *)(unsigned long)(__x))
+#define A(__x) compat_ptr(__x)
static __inline__ void *alloc_user_space(long len)
{
static int fbiogetputcmap(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct fbcmap f;
+ struct fbcmap32 __user *argp = (void __user *)arg;
+ struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
+ u32 addr;
int ret;
- char red[256], green[256], blue[256];
- u32 r, g, b;
- mm_segment_t old_fs = get_fs();
- ret = get_user(f.index, &(((struct fbcmap32 __user *)arg)->index));
- ret |= __get_user(f.count, &(((struct fbcmap32 __user *)arg)->count));
- ret |= __get_user(r, &(((struct fbcmap32 __user *)arg)->red));
- ret |= __get_user(g, &(((struct fbcmap32 __user *)arg)->green));
- ret |= __get_user(b, &(((struct fbcmap32 __user *)arg)->blue));
+ ret = copy_in_user(p, argp, 2 * sizeof(int));
+ ret |= get_user(addr, &argp->red);
+ ret |= put_user(compat_ptr(addr), &p->red);
+ ret |= get_user(addr, &argp->green);
+ ret |= put_user(compat_ptr(addr), &p->green);
+ ret |= get_user(addr, &argp->blue);
+ ret |= put_user(compat_ptr(addr), &p->blue);
if (ret)
return -EFAULT;
- if ((f.index < 0) || (f.index > 255)) return -EINVAL;
- if (f.index + f.count > 256)
- f.count = 256 - f.index;
- if (cmd == FBIOPUTCMAP32) {
- ret = copy_from_user (red, A(r), f.count);
- ret |= copy_from_user (green, A(g), f.count);
- ret |= copy_from_user (blue, A(b), f.count);
- if (ret)
- return -EFAULT;
- }
- f.red = red; f.green = green; f.blue = blue;
- set_fs (KERNEL_DS);
- ret = sys_ioctl (fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (long)&f);
- set_fs (old_fs);
- if (!ret && cmd == FBIOGETCMAP32) {
- ret = copy_to_user (A(r), red, f.count);
- ret |= copy_to_user (A(g), green, f.count);
- ret |= copy_to_user (A(b), blue, f.count);
- }
- return ret ? -EFAULT : 0;
+ return sys_ioctl(fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (unsigned long)p);
}
struct fbcursor32 {
static int fbiogscursor(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct fbcursor f;
+ struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
+ struct fbcursor32 __user *argp = (void __user *)arg;
+ compat_uptr_t addr;
int ret;
- char red[2], green[2], blue[2];
- char image[128], mask[128];
- u32 r, g, b;
- u32 m, i;
- mm_segment_t old_fs = get_fs();
- ret = copy_from_user (&f, (struct fbcursor32 __user *) arg,
+ ret = copy_in_user(p, argp,
2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
- ret |= __get_user(f.size.x,
- &(((struct fbcursor32 __user *)arg)->size.x));
- ret |= __get_user(f.size.y,
- &(((struct fbcursor32 __user *)arg)->size.y));
- ret |= __get_user(f.cmap.index,
- &(((struct fbcursor32 __user *)arg)->cmap.index));
- ret |= __get_user(f.cmap.count,
- &(((struct fbcursor32 __user *)arg)->cmap.count));
- ret |= __get_user(r, &(((struct fbcursor32 __user *)arg)->cmap.red));
- ret |= __get_user(g, &(((struct fbcursor32 __user *)arg)->cmap.green));
- ret |= __get_user(b, &(((struct fbcursor32 __user *)arg)->cmap.blue));
- ret |= __get_user(m, &(((struct fbcursor32 __user *)arg)->mask));
- ret |= __get_user(i, &(((struct fbcursor32 __user *)arg)->image));
+ ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
+ ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
+ ret |= get_user(addr, &argp->cmap.red);
+ ret |= put_user(compat_ptr(addr), &p->cmap.red);
+ ret |= get_user(addr, &argp->cmap.green);
+ ret |= put_user(compat_ptr(addr), &p->cmap.green);
+ ret |= get_user(addr, &argp->cmap.blue);
+ ret |= put_user(compat_ptr(addr), &p->cmap.blue);
+ ret |= get_user(addr, &argp->mask);
+ ret |= put_user(compat_ptr(addr), &p->mask);
+ ret |= get_user(addr, &argp->image);
+ ret |= put_user(compat_ptr(addr), &p->image);
if (ret)
return -EFAULT;
- if (f.set & FB_CUR_SETCMAP) {
- if ((uint) f.size.y > 32)
- return -EINVAL;
- ret = copy_from_user (mask, A(m), f.size.y * 4);
- ret |= copy_from_user (image, A(i), f.size.y * 4);
- if (ret)
- return -EFAULT;
- f.image = image; f.mask = mask;
- }
- if (f.set & FB_CUR_SETCMAP) {
- ret = copy_from_user (red, A(r), 2);
- ret |= copy_from_user (green, A(g), 2);
- ret |= copy_from_user (blue, A(b), 2);
- if (ret)
- return -EFAULT;
- f.cmap.red = red; f.cmap.green = green; f.cmap.blue = blue;
- }
- set_fs (KERNEL_DS);
- ret = sys_ioctl (fd, FBIOSCURSOR, (long)&f);
- set_fs (old_fs);
- return ret;
+ return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p);
}
#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
- char __user *name_ptr, *date_ptr, *desc_ptr;
- u32 tmp1, tmp2, tmp3;
- drm_version_t kversion;
- mm_segment_t old_fs;
+ drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int n;
int ret;
- memset(&kversion, 0, sizeof(kversion));
- if (get_user(kversion.name_len, &uversion->name_len) ||
- get_user(kversion.date_len, &uversion->date_len) ||
- get_user(kversion.desc_len, &uversion->desc_len) ||
- get_user(tmp1, &uversion->name) ||
- get_user(tmp2, &uversion->date) ||
- get_user(tmp3, &uversion->desc))
+ if (clear_user(p, 3 * sizeof(int)) ||
+ get_user(n, &uversion->name_len) ||
+ put_user(n, &p->name_len) ||
+ get_user(addr, &uversion->name) ||
+ put_user(compat_ptr(addr), &p->name) ||
+ get_user(n, &uversion->date_len) ||
+ put_user(n, &p->date_len) ||
+ get_user(addr, &uversion->date) ||
+ put_user(compat_ptr(addr), &p->date) ||
+ get_user(n, &uversion->desc_len) ||
+ put_user(n, &p->desc_len) ||
+ get_user(addr, &uversion->desc) ||
+ put_user(compat_ptr(addr), &p->desc))
return -EFAULT;
- name_ptr = A(tmp1);
- date_ptr = A(tmp2);
- desc_ptr = A(tmp3);
-
- ret = -ENOMEM;
- if (kversion.name_len && name_ptr) {
- kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
- if (!kversion.name)
- goto out;
- }
- if (kversion.date_len && date_ptr) {
- kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
- if (!kversion.date)
- goto out;
- }
- if (kversion.desc_len && desc_ptr) {
- kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
- if (!kversion.desc)
- goto out;
- }
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
- set_fs(old_fs);
-
- if (!ret) {
- if ((kversion.name &&
- copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
- (kversion.date &&
- copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
- (kversion.desc &&
- copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
- ret = -EFAULT;
- if (put_user(kversion.version_major, &uversion->version_major) ||
- put_user(kversion.version_minor, &uversion->version_minor) ||
- put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
- put_user(kversion.name_len, &uversion->name_len) ||
- put_user(kversion.date_len, &uversion->date_len) ||
- put_user(kversion.desc_len, &uversion->desc_len))
- ret = -EFAULT;
- }
+ ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
+ if (ret)
+ return ret;
+
+ if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
+ get_user(n, &p->name_len) ||
+ put_user(n, &uversion->name_len) ||
+ get_user(n, &p->date_len) ||
+ put_user(n, &uversion->date_len) ||
+ get_user(n, &p->desc_len) ||
+ put_user(n, &uversion->desc_len))
+ return -EFAULT;
-out:
- if (kversion.name)
- kfree(kversion.name);
- if (kversion.date)
- kfree(kversion.date);
- if (kversion.desc)
- kfree(kversion.desc);
- return ret;
+ return 0;
}
typedef struct drm32_unique {
static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg;
- drm_unique_t karg;
- mm_segment_t old_fs;
- char __user *uptr;
- u32 tmp;
+ drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int n;
int ret;
- if (get_user(karg.unique_len, &uarg->unique_len))
+ if (get_user(n, &uarg->unique_len) ||
+ put_user(n, &p->unique_len) ||
+ get_user(addr, &uarg->unique) ||
+ put_user(compat_ptr(addr), &p->unique))
return -EFAULT;
- karg.unique = NULL;
-
- if (get_user(tmp, &uarg->unique))
- return -EFAULT;
-
- uptr = A(tmp);
- if (uptr) {
- karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
- if (!karg.unique)
- return -ENOMEM;
- if (cmd == DRM32_IOCTL_SET_UNIQUE &&
- copy_from_user(karg.unique, uptr, karg.unique_len)) {
- kfree(karg.unique);
- return -EFAULT;
- }
- }
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
if (cmd == DRM32_IOCTL_GET_UNIQUE)
- ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
+ ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p);
else
- ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
- set_fs(old_fs);
+ ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p);
- if (!ret) {
- if (cmd == DRM32_IOCTL_GET_UNIQUE &&
- uptr != NULL &&
- copy_to_user(uptr, karg.unique, karg.unique_len))
- ret = -EFAULT;
- if (put_user(karg.unique_len, &uarg->unique_len))
- ret = -EFAULT;
- }
+ if (ret)
+ return ret;
- if (karg.unique != NULL)
- kfree(karg.unique);
+ if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len))
+ return -EFAULT;
- return ret;
+ return 0;
}
typedef struct drm32_map {
static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg;
- drm_buf_desc_t __user *ulist;
- drm_buf_info_t karg;
- mm_segment_t old_fs;
- int orig_count, ret;
- u32 tmp;
+ drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int n;
+ int ret;
- if (get_user(karg.count, &uarg->count) ||
- get_user(tmp, &uarg->list))
+ if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
+ get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
return -EFAULT;
- ulist = A(tmp);
-
- orig_count = karg.count;
+ ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p);
+ if (ret)
+ return ret;
- karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
- if (!karg.list)
+ if (get_user(n, &p->count) || put_user(n, &uarg->count))
return -EFAULT;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
- set_fs(old_fs);
-
- if (!ret) {
- if (karg.count <= orig_count &&
- (copy_to_user(ulist, karg.list,
- karg.count * sizeof(drm_buf_desc_t))))
- ret = -EFAULT;
- if (put_user(karg.count, &uarg->count))
- ret = -EFAULT;
- }
-
- kfree(karg.list);
-
- return ret;
+ return 0;
}
typedef struct drm32_buf_free {
static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg;
- drm_buf_free_t karg;
- mm_segment_t old_fs;
- int __user *ulist;
- int ret;
- u32 tmp;
+ drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int n;
- if (get_user(karg.count, &uarg->count) ||
- get_user(tmp, &uarg->list))
+ if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
+ get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
return -EFAULT;
- ulist = A(tmp);
-
- karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
- if (!karg.list)
- return -ENOMEM;
-
- ret = -EFAULT;
- if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
- goto out;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
- set_fs(old_fs);
-
-out:
- kfree(karg.list);
-
- return ret;
+ return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p);
}
typedef struct drm32_buf_pub {
{
drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg;
drm32_buf_pub_t __user *ulist;
- drm_buf_map_t karg;
- mm_segment_t old_fs;
+ drm_buf_map_t __user *arg64;
+ drm_buf_pub_t __user *list;
int orig_count, ret, i;
- u32 tmp1, tmp2;
+ int n;
+ compat_uptr_t addr;
- if (get_user(karg.count, &uarg->count) ||
- get_user(tmp1, &uarg->virtual) ||
- get_user(tmp2, &uarg->list))
+ if (get_user(orig_count, &uarg->count))
return -EFAULT;
- karg.virtual = (void *) (unsigned long) tmp1;
- ulist = A(tmp2);
-
- orig_count = karg.count;
-
- karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
- if (!karg.list)
- return -ENOMEM;
+ arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) +
+ (size_t)orig_count * sizeof(drm_buf_pub_t));
+ list = (void __user *)(arg64 + 1);
- ret = -EFAULT;
- for (i = 0; i < karg.count; i++) {
- if (get_user(karg.list[i].idx, &ulist[i].idx) ||
- get_user(karg.list[i].total, &ulist[i].total) ||
- get_user(karg.list[i].used, &ulist[i].used) ||
- get_user(tmp1, &ulist[i].address))
- goto out;
+ if (put_user(orig_count, &arg64->count) ||
+ put_user(list, &arg64->list) ||
+ get_user(addr, &uarg->virtual) ||
+ put_user(compat_ptr(addr), &arg64->virtual) ||
+ get_user(addr, &uarg->list))
+ return -EFAULT;
- karg.list[i].address = (void *) (unsigned long) tmp1;
+ ulist = compat_ptr(addr);
+
+ for (i = 0; i < orig_count; i++) {
+ if (get_user(n, &ulist[i].idx) ||
+ put_user(n, &list[i].idx) ||
+ get_user(n, &ulist[i].total) ||
+ put_user(n, &list[i].total) ||
+ get_user(n, &ulist[i].used) ||
+ put_user(n, &list[i].used) ||
+ get_user(addr, &ulist[i].address) ||
+ put_user(compat_ptr(addr), &list[i].address))
+ return -EFAULT;
}
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
- set_fs(old_fs);
-
- if (!ret) {
- for (i = 0; i < orig_count; i++) {
- tmp1 = (u32) (long) karg.list[i].address;
- if (put_user(karg.list[i].idx, &ulist[i].idx) ||
- put_user(karg.list[i].total, &ulist[i].total) ||
- put_user(karg.list[i].used, &ulist[i].used) ||
- put_user(tmp1, &ulist[i].address)) {
- ret = -EFAULT;
- goto out;
- }
- }
- if (put_user(karg.count, &uarg->count))
- ret = -EFAULT;
+ ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < orig_count; i++) {
+ void __user *p;
+ if (get_user(n, &list[i].idx) ||
+ put_user(n, &ulist[i].idx) ||
+ get_user(n, &list[i].total) ||
+ put_user(n, &ulist[i].total) ||
+ get_user(n, &list[i].used) ||
+ put_user(n, &ulist[i].used) ||
+ get_user(p, &list[i].address) ||
+ put_user((unsigned long)p, &ulist[i].address))
+ return -EFAULT;
}
-out:
- kfree(karg.list);
- return ret;
+ if (get_user(n, &arg64->count) || put_user(n, &uarg->count))
+ return -EFAULT;
+
+ return 0;
}
typedef struct drm32_dma {
static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg;
- int __user *u_si, *u_ss, *u_ri, *u_rs;
- drm_dma_t karg;
- mm_segment_t old_fs;
+ drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
int ret;
- u32 tmp1, tmp2, tmp3, tmp4;
-
- karg.send_indices = karg.send_sizes = NULL;
- karg.request_indices = karg.request_sizes = NULL;
-
- if (get_user(karg.context, &uarg->context) ||
- get_user(karg.send_count, &uarg->send_count) ||
- get_user(tmp1, &uarg->send_indices) ||
- get_user(tmp2, &uarg->send_sizes) ||
- get_user(karg.flags, &uarg->flags) ||
- get_user(karg.request_count, &uarg->request_count) ||
- get_user(karg.request_size, &uarg->request_size) ||
- get_user(tmp3, &uarg->request_indices) ||
- get_user(tmp4, &uarg->request_sizes) ||
- get_user(karg.granted_count, &uarg->granted_count))
- return -EFAULT;
-
- u_si = A(tmp1);
- u_ss = A(tmp2);
- u_ri = A(tmp3);
- u_rs = A(tmp4);
-
- if (karg.send_count) {
- karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
- karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
-
- ret = -ENOMEM;
- if (!karg.send_indices || !karg.send_sizes)
- goto out;
-
- ret = -EFAULT;
- if (copy_from_user(karg.send_indices, u_si,
- (karg.send_count * sizeof(int))) ||
- copy_from_user(karg.send_sizes, u_ss,
- (karg.send_count * sizeof(int))))
- goto out;
- }
-
- if (karg.request_count) {
- karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
- karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
-
- ret = -ENOMEM;
- if (!karg.request_indices || !karg.request_sizes)
- goto out;
- ret = -EFAULT;
- if (copy_from_user(karg.request_indices, u_ri,
- (karg.request_count * sizeof(int))) ||
- copy_from_user(karg.request_sizes, u_rs,
- (karg.request_count * sizeof(int))))
- goto out;
- }
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
- set_fs(old_fs);
-
- if (!ret) {
- if (put_user(karg.context, &uarg->context) ||
- put_user(karg.send_count, &uarg->send_count) ||
- put_user(karg.flags, &uarg->flags) ||
- put_user(karg.request_count, &uarg->request_count) ||
- put_user(karg.request_size, &uarg->request_size) ||
- put_user(karg.granted_count, &uarg->granted_count))
- ret = -EFAULT;
+ if (copy_in_user(p, uarg, 2 * sizeof(int)) ||
+ get_user(addr, &uarg->send_indices) ||
+ put_user(compat_ptr(addr), &p->send_indices) ||
+ get_user(addr, &uarg->send_sizes) ||
+ put_user(compat_ptr(addr), &p->send_sizes) ||
+ copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) ||
+ copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))||
+ copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) ||
+ get_user(addr, &uarg->request_indices) ||
+ put_user(compat_ptr(addr), &p->request_indices) ||
+ get_user(addr, &uarg->request_sizes) ||
+ put_user(compat_ptr(addr), &p->request_sizes) ||
+ copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int)))
+ return -EFAULT;
- if (karg.send_count) {
- if (copy_to_user(u_si, karg.send_indices,
- (karg.send_count * sizeof(int))) ||
- copy_to_user(u_ss, karg.send_sizes,
- (karg.send_count * sizeof(int))))
- ret = -EFAULT;
- }
- if (karg.request_count) {
- if (copy_to_user(u_ri, karg.request_indices,
- (karg.request_count * sizeof(int))) ||
- copy_to_user(u_rs, karg.request_sizes,
- (karg.request_count * sizeof(int))))
- ret = -EFAULT;
- }
- }
+ ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p);
+ if (ret)
+ return ret;
-out:
- if (karg.send_indices)
- kfree(karg.send_indices);
- if (karg.send_sizes)
- kfree(karg.send_sizes);
- if (karg.request_indices)
- kfree(karg.request_indices);
- if (karg.request_sizes)
- kfree(karg.request_sizes);
+ if (copy_in_user(uarg, p, 2 * sizeof(int)) ||
+ copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) ||
+ copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))||
+ copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) ||
+ copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int)))
+ return -EFAULT;
- return ret;
+ return 0;
}
typedef struct drm32_ctx_res {
static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
{
drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg;
- drm_ctx_t __user *ulist;
- drm_ctx_res_t karg;
- mm_segment_t old_fs;
- int orig_count, ret;
- u32 tmp;
+ drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int ret;
- karg.contexts = NULL;
- if (get_user(karg.count, &uarg->count) ||
- get_user(tmp, &uarg->contexts))
+ if (copy_in_user(p, uarg, sizeof(int)) ||
+ get_user(addr, &uarg->contexts) ||
+ put_user(compat_ptr(addr), &p->contexts))
return -EFAULT;
- ulist = A(tmp);
-
- orig_count = karg.count;
- if (karg.count && ulist) {
- karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
- if (!karg.contexts)
- return -ENOMEM;
- if (copy_from_user(karg.contexts, ulist,
- (karg.count * sizeof(drm_ctx_t)))) {
- kfree(karg.contexts);
- return -EFAULT;
- }
- }
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
- set_fs(old_fs);
-
- if (!ret) {
- if (orig_count) {
- if (copy_to_user(ulist, karg.contexts,
- (orig_count * sizeof(drm_ctx_t))))
- ret = -EFAULT;
- }
- if (put_user(karg.count, &uarg->count))
- ret = -EFAULT;
- }
+ ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p);
+ if (ret)
+ return ret;
- if (karg.contexts)
- kfree(karg.contexts);
+ if (copy_in_user(uarg, p, sizeof(int)))
+ return -EFAULT;
- return ret;
+ return 0;
}
#endif
#include <stdarg.h>
+#include <linux/config.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/config.h>
#include <linux/reboot.h>
#include <linux/delay.h>
+#include <linux/compat.h>
#include <linux/init.h>
#include <asm/oplib.h>
clone_flags &= ~CLONE_IDLETASK;
+#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) {
parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
- } else {
+ } else
+#endif
+ {
parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
}
*
* On SYSIO, using an 8K page size we have 1GB of SBUS
* DMA space mapped. We divide this space into equally
- * sized clusters. Currently we allow clusters up to a
- * size of 1MB. If anything begins to generate DMA
- * mapping requests larger than this we will need to
- * increase things a bit.
+ * sized clusters. We allocate a DMA mapping from the
+ * cluster that matches the order of the allocation, or
+ * if the order is greater than the number of clusters,
+ * we try to allocate from the last cluster.
*/
#define NCLUSTERS 8UL
static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
{
- iopte_t *iopte, *limit, *first;
- unsigned long cnum, ent, flush_point;
+ iopte_t *iopte, *limit, *first, *cluster;
+ unsigned long cnum, ent, nent, flush_point, found;
cnum = 0;
+ nent = 1;
while ((1UL << cnum) < npages)
cnum++;
+ if(cnum >= NCLUSTERS) {
+ nent = 1UL << (cnum - NCLUSTERS);
+ cnum = NCLUSTERS - 1;
+ }
iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
if (cnum == 0)
flush_point = iommu->alloc_info[cnum].flush;
first = iopte;
+ cluster = NULL;
+ found = 0;
for (;;) {
if (iopte_val(*iopte) == 0UL) {
- if ((iopte + (1 << cnum)) >= limit)
- ent = 0;
- else
- ent = ent + 1;
- iommu->alloc_info[cnum].next = ent;
- if (ent == flush_point)
- __iommu_flushall(iommu);
- break;
+ found++;
+ if (!cluster)
+ cluster = iopte;
+ } else {
+ /* Used cluster in the way */
+ cluster = NULL;
+ found = 0;
}
+
+ if (found == nent)
+ break;
+
iopte += (1 << cnum);
ent++;
if (iopte >= limit) {
iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
ent = 0;
+
+ /* Multiple cluster allocations must not wrap */
+ cluster = NULL;
+ found = 0;
}
if (ent == flush_point)
__iommu_flushall(iommu);
goto bad;
}
+ /* ent/iopte points to the last cluster entry we're going to use,
+ * so save our place for the next allocation.
+ */
+ if ((iopte + (1 << cnum)) >= limit)
+ ent = 0;
+ else
+ ent = ent + 1;
+ iommu->alloc_info[cnum].next = ent;
+ if (ent == flush_point)
+ __iommu_flushall(iommu);
+
/* I've got your streaming cluster right here buddy boy... */
- return iopte;
+ return cluster;
bad:
printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
{
- unsigned long cnum, ent;
+ unsigned long cnum, ent, nent;
iopte_t *iopte;
cnum = 0;
+ nent = 1;
while ((1UL << cnum) < npages)
cnum++;
+ if(cnum >= NCLUSTERS) {
+ nent = 1UL << (cnum - NCLUSTERS);
+ cnum = NCLUSTERS - 1;
+ }
ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
- iopte_val(*iopte) = 0UL;
+ do {
+ iopte_val(*iopte) = 0UL;
+ iopte += 1 << cnum;
+ } while(--nent);
/* If the global flush might not have caught this entry,
* adjust the flush point such that we will flush before
EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
/* Out of line spin-locking implementation. */
+EXPORT_SYMBOL(_raw_spin_lock);
EXPORT_SYMBOL(_raw_spin_lock_flags);
#endif
EXPORT_SYMBOL(synchronize_irq);
#if defined(CONFIG_MCOUNT)
-extern void mcount(void);
-EXPORT_SYMBOL_NOVERS(mcount);
+extern void _mcount(void);
+EXPORT_SYMBOL_NOVERS(_mcount);
#endif
/* CPU online map and active count. */
#endif
/* Special internal versions of library functions. */
-EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(_clear_page);
EXPORT_SYMBOL(clear_user_page);
EXPORT_SYMBOL(csum_partial_copy_sparc64);
EXPORT_SYMBOL(ip_fast_csum);
-/* Moving data to/from userspace. */
+/* Moving data to/from/in userspace. */
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(__copy_in_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__bzero_noasi);
if (flags & MAP_FIXED) {
/* Ok, don't mess with it. */
- return get_unmapped_area(NULL, addr, len, pgoff, flags, 0);
+ return get_unmapped_area(NULL, addr, len, pgoff, flags);
}
flags &= ~MAP_SHARED;
align_goal = (64UL * 1024);
do {
- addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags, 0);
+ addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
if (!(addr & ~PAGE_MASK)) {
addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
break;
* be obtained.
*/
if (addr & ~PAGE_MASK)
- addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags, 0);
+ addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
return addr;
}
/* MREMAP_FIXED checked above. */
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
- map_flags, vma->vm_flags & VM_EXEC);
+ map_flags);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
return sys_ftruncate(fd, (high << 32) | low);
}
-/* readdir & getdents */
-
-#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
-#define ROUND_UP(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
-
-struct old_linux_dirent32 {
- u32 d_ino;
- u32 d_offset;
- unsigned short d_namlen;
- char d_name[1];
-};
-
-struct readdir_callback32 {
- struct old_linux_dirent32 __user * dirent;
- int count;
-};
-
-static int fillonedir(void * __buf, const char * name, int namlen,
- loff_t offset, ino_t ino, unsigned int d_type)
-{
- struct readdir_callback32 * buf = (struct readdir_callback32 *) __buf;
- struct old_linux_dirent32 __user * dirent;
-
- if (buf->count)
- return -EINVAL;
- buf->count++;
- dirent = buf->dirent;
- put_user(ino, &dirent->d_ino);
- put_user(offset, &dirent->d_offset);
- put_user(namlen, &dirent->d_namlen);
- copy_to_user(dirent->d_name, name, namlen);
- put_user(0, dirent->d_name + namlen);
- return 0;
-}
-
-asmlinkage long old32_readdir(unsigned int fd, struct old_linux_dirent32 __user *dirent, unsigned int count)
-{
- int error = -EBADF;
- struct file * file;
- struct readdir_callback32 buf;
-
- file = fget(fd);
- if (!file)
- goto out;
-
- buf.count = 0;
- buf.dirent = dirent;
-
- error = vfs_readdir(file, fillonedir, &buf);
- if (error < 0)
- goto out_putf;
- error = buf.count;
-
-out_putf:
- fput(file);
-out:
- return error;
-}
-
-struct linux_dirent32 {
- u32 d_ino;
- u32 d_off;
- unsigned short d_reclen;
- char d_name[1];
-};
-
-struct getdents_callback32 {
- struct linux_dirent32 __user *current_dir;
- struct linux_dirent32 __user *previous;
- int count;
- int error;
-};
-
-static int filldir(void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
- unsigned int d_type)
-{
- struct linux_dirent32 __user * dirent;
- struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf;
- int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 2);
-
- buf->error = -EINVAL; /* only used if we fail.. */
- if (reclen > buf->count)
- return -EINVAL;
- dirent = buf->previous;
- if (dirent)
- put_user(offset, &dirent->d_off);
- dirent = buf->current_dir;
- buf->previous = dirent;
- put_user(ino, &dirent->d_ino);
- put_user(reclen, &dirent->d_reclen);
- copy_to_user(dirent->d_name, name, namlen);
- put_user(0, dirent->d_name + namlen);
- put_user(d_type, (char __user *) dirent + reclen - 1);
- dirent = (void __user *) dirent + reclen;
- buf->current_dir = dirent;
- buf->count -= reclen;
- return 0;
-}
-
-asmlinkage long sys32_getdents(unsigned int fd, struct linux_dirent32 __user *dirent, unsigned int count)
-{
- struct file * file;
- struct linux_dirent32 __user *lastdirent;
- struct getdents_callback32 buf;
- int error = -EBADF;
-
- file = fget(fd);
- if (!file)
- goto out;
-
- buf.current_dir = dirent;
- buf.previous = NULL;
- buf.count = count;
- buf.error = 0;
-
- error = vfs_readdir(file, filldir, &buf);
- if (error < 0)
- goto out_putf;
- lastdirent = buf.previous;
- error = buf.error;
- if (lastdirent) {
- put_user(file->f_pos, &lastdirent->d_off);
- error = count - buf.count;
- }
-out_putf:
- fput(file);
-out:
- return error;
-}
-
-/* end of readdir & getdents */
-
int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
{
int err;
/* MREMAP_FIXED checked above. */
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
- map_flags, vma->vm_flags & VM_EXEC);
+ map_flags);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
.text
.align 4
+#ifdef CONFIG_COMPAT
/* First, the 32-bit Linux native syscall table. */
.globl sys_call_table32
.word compat_sys_fcntl64, sys_ni_syscall, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
.word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
-/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, sys32_getdents
+/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
.word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
.word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname
/*190*/ .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl
.word sys32_epoll_wait, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
-/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, old32_readdir
+/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_old_readdir
.word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo
.word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
.word sys_mq_timedsend, sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, sys_ni_syscall
/*280*/ .word sys_ni_syscall, sys_ni_syscall, sys_ni_syscall
+#endif /* CONFIG_COMPAT */
+
/* Now the 64-bit native Linux syscall table. */
.align 4
sys_call_table:
/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
-/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown, sys_mknod
+/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
/*15*/ .word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
/*25*/ .word sys_nis_syscall, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
-/* $Id: U3copy_from_user.S,v 1.4 2002/01/15 07:16:26 davem Exp $
- * U3memcpy.S: UltraSparc-III optimized copy from userspace.
+/* U3copy_from_user.S: UltraSparc-III optimized copy from userspace.
*
- * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
-#undef SMALL_COPY_USES_FPU
+
+#define XCC xcc
+
+#define EXNV_RAW(x,y,a,b) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: ba U3cfu_fixup; \
+ a, b, %o1; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
-99: VISExitHalf; \
+99: add %o1, %o3, %o0; \
ba U3cfu_fixup; \
a, b, %o1; \
.section __ex_table; \
.word 98b, 99b; \
.text; \
.align 4;
+#define EXNV4(x,y,a,b) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: add %o1, %o3, %o0; \
+ a, b, %o1; \
+ ba U3cfu_fixup; \
+ add %o1, 4, %o1; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXNV8(x,y,a,b) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: add %o1, %o3, %o0; \
+ a, b, %o1; \
+ ba U3cfu_fixup; \
+ add %o1, 8, %o1; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup; \
.word 98b, 99b; \
.text; \
.align 4;
-#else
-#define ASI_BLK_P 0xf0
-#define FPRS_FEF 0x04
-#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
-#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
-#define SMALL_COPY_USES_FPU
-#define EXNV(x,y,a,b) x,y;
-#define EX(x,y,a,b) x,y;
-#define EX2(x,y) x,y;
-#define EX3(x,y) x,y;
-#define EX4(x,y) x,y;
-#endif
+
+ .register %g2,#scratch
+ .register %g3,#scratch
/* Special/non-trivial issues of this code:
*
* of up to 2.4GB per second.
*/
- .globl U3copy_from_user
-U3copy_from_user: /* %o0=dst, %o1=src, %o2=len */
-#ifndef __KERNEL__
- /* Save away original 'dst' for memcpy return value. */
- mov %o0, %g3 ! A0 Group
-#endif
- /* Anything to copy at all? */
- cmp %o2, 0 ! A1
- ble,pn %icc, U3copy_from_user_short_ret! BR
-
- /* Extremely small copy? */
- cmp %o2, 31 ! A0 Group
- ble,pn %icc, U3copy_from_user_short ! BR
-
- /* Large enough to use unrolled prefetch loops? */
- cmp %o2, 0x100 ! A1
- bge,a,pt %icc, U3copy_from_user_enter ! BR Group
- andcc %o0, 0x3f, %g2 ! A0
-
- ba,pt %xcc, U3copy_from_user_toosmall ! BR Group
- andcc %o0, 0x7, %g2 ! A0
-
- .align 32
-U3copy_from_user_short:
- /* Copy %o2 bytes from src to dst, one byte at a time. */
- EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g0)! MS Group
- add %o1, 0x1, %o1 ! A0
- add %o0, 0x1, %o0 ! A1
- subcc %o2, 1, %o2 ! A0 Group
-
- bg,pt %icc, U3copy_from_user_short ! BR
- stb %o3, [%o0 + -1] ! MS Group (1-cycle stall)
-
-U3copy_from_user_short_ret:
-#ifdef __KERNEL__
- retl ! BR Group (0-4 cycle stall)
- clr %o0 ! A0
-#else
- retl ! BR Group (0-4 cycle stall)
- mov %g3, %o0 ! A0
-#endif
-
- /* Here len >= (6 * 64) and condition codes reflect execution
+ .globl U3copy_from_user
+U3copy_from_user: /* %o0=dst, %o1=src, %o2=len */
+ cmp %o2, 0
+ be,pn %XCC, out
+ or %o0, %o1, %o3
+ cmp %o2, 16
+ bleu,a,pn %XCC, small_copy
+ or %o3, %o2, %o3
+
+ cmp %o2, 256
+ blu,pt %XCC, medium_copy
+ andcc %o3, 0x7, %g0
+
+ ba,pt %xcc, enter
+ andcc %o0, 0x3f, %g2
+
+ /* Here len >= 256 and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-U3copy_from_user_enter:
+enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %xcc, 2f ! BR
+ be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2 ! A0 Group
- sub %g0, %g2, %g2 ! A0 Group
- sub %o2, %g2, %o2 ! A0 Group
+ sub %g2, 0x40, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group)
- add %o1, 0x1, %o1 ! A1
- add %o0, 0x1, %o0 ! A0 Group
- subcc %g2, 0x1, %g2 ! A1
+1: EXNV_RAW(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
- bg,pt %icc, 1b ! BR Group
- stb %o3, [%o0 + -1] ! MS Group
+ bg,pt %XCC, 1b
+ stb %o3, [%o0 + -1]
-2: VISEntryHalf ! MS+MS
- and %o1, 0x7, %g1 ! A1
- ba,pt %xcc, U3copy_from_user_begin ! BR
- alignaddr %o1, %g0, %o1 ! MS (Break-after)
+2: VISEntryHalf
+ and %o1, 0x7, %g1
+ ba,pt %xcc, begin
+ alignaddr %o1, %g0, %o1
.align 64
-U3copy_from_user_begin:
-#ifdef __KERNEL__
+begin:
+
.globl U3copy_from_user_nop_1_6
U3copy_from_user_nop_1_6:
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
or %g3, %o3, %o3
stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
membar #Sync
-#endif
- prefetcha [%o1 + 0x000] %asi, #one_read ! MS Group1
- prefetcha [%o1 + 0x040] %asi, #one_read ! MS Group2
- andn %o2, (0x40 - 1), %o4 ! A0
- prefetcha [%o1 + 0x080] %asi, #one_read ! MS Group3
- cmp %o4, 0x140 ! A0
- prefetcha [%o1 + 0x0c0] %asi, #one_read ! MS Group4
- EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0) ! MS Group5 (%f0 results at G8)
- bge,a,pt %icc, 1f ! BR
-
- prefetcha [%o1 + 0x100] %asi, #one_read ! MS Group6
-1: EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0) ! AX (%f2 results at G9)
- cmp %o4, 0x180 ! A1
- bge,a,pt %icc, 1f ! BR
- prefetcha [%o1 + 0x140] %asi, #one_read ! MS Group7
-1: EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0) ! AX (%f4 results at G10)
- cmp %o4, 0x1c0 ! A1
- bge,a,pt %icc, 1f ! BR
-
- prefetcha [%o1 + 0x180] %asi, #one_read ! MS Group8
-1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
- EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0) ! AX (%f6 results at G12)
- faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
- EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0) ! MS (%f8 results at G13)
- faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
- EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0) ! MS (%f10 results at G15)
- faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
-
- EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0) ! MS (%f12 results at G16)
- faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
- EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0) ! MS (%f14 results at G18)
- faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
- EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0) ! MS (%f0 results at G19)
-
- /* We only use the first loop if len > (7 * 64). */
- subcc %o4, 0x1c0, %o4 ! A0 Group17
- bg,pt %icc, U3copy_from_user_loop1 ! BR
- add %o1, 0x40, %o1 ! A1
-
- add %o4, 0x140, %o4 ! A0 Group18
- ba,pt %xcc, U3copy_from_user_loop2 ! BR
- srl %o4, 6, %o3 ! A0 Group19
- nop
- nop
- nop
- nop
- nop
-
- nop
- nop
-
- /* This loop performs the copy and queues new prefetches.
- * We drop into the second loop when len <= (5 * 64). Note
- * that this (5 * 64) factor has been subtracted from len
- * already.
- */
-U3copy_from_user_loop1:
- EX2(ldda [%o1 + 0x008] %asi, %f2) ! MS Group2 (%f2 results at G5)
- faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
- EX2(ldda [%o1 + 0x010] %asi, %f4) ! MS Group3 (%f4 results at G6)
- faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
- stda %f16, [%o0] ASI_BLK_P ! MS
- EX2(ldda [%o1 + 0x018] %asi, %f6) ! AX (%f6 results at G7)
-
- faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
- EX2(ldda [%o1 + 0x020] %asi, %f8) ! MS (%f8 results at G15)
- faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
- EX2(ldda [%o1 + 0x028] %asi, %f10) ! MS (%f10 results at G16)
- faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
- EX2(ldda [%o1 + 0x030] %asi, %f12) ! MS (%f12 results at G17)
- faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
- EX2(ldda [%o1 + 0x038] %asi, %f14) ! MS (%f14 results at G18)
-
- faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
- EX2(ldda [%o1 + 0x040] %asi, %f0) ! AX (%f0 results at G19)
- prefetcha [%o1 + 0x180] %asi, #one_read ! MS
- faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
- subcc %o4, 0x40, %o4 ! A0
- add %o1, 0x40, %o1 ! A1
- bg,pt %xcc, U3copy_from_user_loop1 ! BR
- add %o0, 0x40, %o0 ! A0 Group18
-
-U3copy_from_user_loop2_enter:
- mov 5, %o3 ! A1
-
- /* This loop performs on the copy, no new prefetches are
- * queued. We do things this way so that we do not perform
- * any spurious prefetches past the end of the src buffer.
- */
-U3copy_from_user_loop2:
- EX3(ldda [%o1 + 0x008] %asi, %f2) ! MS
- faligndata %f12, %f14, %f28 ! FGA Group2
- EX3(ldda [%o1 + 0x010] %asi, %f4) ! MS
- faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
- stda %f16, [%o0] ASI_BLK_P ! MS
- EX3(ldda [%o1 + 0x018] %asi, %f6) ! AX
- faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
-
- EX3(ldda [%o1 + 0x020] %asi, %f8) ! MS
- faligndata %f2, %f4, %f18 ! FGA Group13
- EX3(ldda [%o1 + 0x028] %asi, %f10) ! MS
- faligndata %f4, %f6, %f20 ! FGA Group14
- EX3(ldda [%o1 + 0x030] %asi, %f12) ! MS
- faligndata %f6, %f8, %f22 ! FGA Group15
- EX3(ldda [%o1 + 0x038] %asi, %f14) ! MS
- faligndata %f8, %f10, %f24 ! FGA Group16
-
- EX3(ldda [%o1 + 0x040] %asi, %f0) ! AX
- faligndata %f10, %f12, %f26 ! FGA Group17
- subcc %o3, 0x01, %o3 ! A0
- add %o1, 0x40, %o1 ! A1
- bg,pt %xcc, U3copy_from_user_loop2 ! BR
- add %o0, 0x40, %o0 ! A0 Group18
+
+ prefetcha [%o1 + 0x000] %asi, #one_read
+ prefetcha [%o1 + 0x040] %asi, #one_read
+ andn %o2, (0x40 - 1), %o4
+ prefetcha [%o1 + 0x080] %asi, #one_read
+ prefetcha [%o1 + 0x0c0] %asi, #one_read
+ EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0)
+ prefetcha [%o1 + 0x100] %asi, #one_read
+ EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0)
+ prefetcha [%o1 + 0x140] %asi, #one_read
+ EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0)
+ prefetcha [%o1 + 0x180] %asi, #one_read
+ faligndata %f0, %f2, %f16
+ EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0)
+ faligndata %f2, %f4, %f18
+ EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0)
+ faligndata %f4, %f6, %f20
+ EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0)
+ faligndata %f6, %f8, %f22
+
+ EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0)
+ faligndata %f8, %f10, %f24
+ EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0)
+ faligndata %f10, %f12, %f26
+ EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0)
+
+ sub %o4, 0x80, %o4
+ add %o1, 0x40, %o1
+ ba,pt %xcc, loop
+ srl %o4, 6, %o3
+
+ .align 64
+loop:
+ EX3(ldda [%o1 + 0x008] %asi, %f2)
+ faligndata %f12, %f14, %f28
+ EX3(ldda [%o1 + 0x010] %asi, %f4)
+ faligndata %f14, %f0, %f30
+ stda %f16, [%o0] ASI_BLK_P
+ EX3(ldda [%o1 + 0x018] %asi, %f6)
+ faligndata %f0, %f2, %f16
+
+ EX3(ldda [%o1 + 0x020] %asi, %f8)
+ faligndata %f2, %f4, %f18
+ EX3(ldda [%o1 + 0x028] %asi, %f10)
+ faligndata %f4, %f6, %f20
+ EX3(ldda [%o1 + 0x030] %asi, %f12)
+ faligndata %f6, %f8, %f22
+ EX3(ldda [%o1 + 0x038] %asi, %f14)
+ faligndata %f8, %f10, %f24
+
+ EX3(ldda [%o1 + 0x040] %asi, %f0)
+ prefetcha [%o1 + 0x180] %asi, #one_read
+ faligndata %f10, %f12, %f26
+ subcc %o3, 0x01, %o3
+ add %o1, 0x40, %o1
+ bg,pt %XCC, loop
+ add %o0, 0x40, %o0
/* Finally we copy the last full 64-byte block. */
-U3copy_from_user_loopfini:
- EX3(ldda [%o1 + 0x008] %asi, %f2) ! MS
- faligndata %f12, %f14, %f28 ! FGA
- EX3(ldda [%o1 + 0x010] %asi, %f4) ! MS Group19
- faligndata %f14, %f0, %f30 ! FGA
- stda %f16, [%o0] ASI_BLK_P ! MS Group20
- EX3(ldda [%o1 + 0x018] %asi, %f6) ! AX
- faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
- EX3(ldda [%o1 + 0x020] %asi, %f8) ! MS
- faligndata %f2, %f4, %f18 ! FGA Group12
- EX3(ldda [%o1 + 0x028] %asi, %f10) ! MS
- faligndata %f4, %f6, %f20 ! FGA Group13
- EX3(ldda [%o1 + 0x030] %asi, %f12) ! MS
- faligndata %f6, %f8, %f22 ! FGA Group14
- EX3(ldda [%o1 + 0x038] %asi, %f14) ! MS
- faligndata %f8, %f10, %f24 ! FGA Group15
- cmp %g1, 0 ! A0
- be,pt %icc, 1f ! BR
- add %o0, 0x40, %o0 ! A1
- EX4(ldda [%o1 + 0x040] %asi, %f0) ! MS
-1: faligndata %f10, %f12, %f26 ! FGA Group16
- faligndata %f12, %f14, %f28 ! FGA Group17
- faligndata %f14, %f0, %f30 ! FGA Group18
- stda %f16, [%o0] ASI_BLK_P ! MS
- add %o0, 0x40, %o0 ! A0
- add %o1, 0x40, %o1 ! A1
-#ifdef __KERNEL__
+loopfini:
+ EX3(ldda [%o1 + 0x008] %asi, %f2)
+ faligndata %f12, %f14, %f28
+ EX3(ldda [%o1 + 0x010] %asi, %f4)
+ faligndata %f14, %f0, %f30
+ stda %f16, [%o0] ASI_BLK_P
+ EX3(ldda [%o1 + 0x018] %asi, %f6)
+ faligndata %f0, %f2, %f16
+ EX3(ldda [%o1 + 0x020] %asi, %f8)
+ faligndata %f2, %f4, %f18
+ EX3(ldda [%o1 + 0x028] %asi, %f10)
+ faligndata %f4, %f6, %f20
+ EX3(ldda [%o1 + 0x030] %asi, %f12)
+ faligndata %f6, %f8, %f22
+ EX3(ldda [%o1 + 0x038] %asi, %f14)
+ faligndata %f8, %f10, %f24
+ cmp %g1, 0
+ be,pt %XCC, 1f
+ add %o0, 0x40, %o0
+ EX4(ldda [%o1 + 0x040] %asi, %f0)
+1: faligndata %f10, %f12, %f26
+ faligndata %f12, %f14, %f28
+ faligndata %f14, %f0, %f30
+ stda %f16, [%o0] ASI_BLK_P
+ add %o0, 0x40, %o0
+ add %o1, 0x40, %o1
+
.globl U3copy_from_user_nop_2_3
U3copy_from_user_nop_2_3:
mov PRIMARY_CONTEXT, %o3
stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
-#endif
- membar #Sync ! MS Group26 (7-cycle stall)
+
+ membar #Sync
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer just like similar
- * code found in U3copy_from_user_toosmall processing.
+ * load past the end of the src buffer.
*/
-U3copy_from_user_loopend:
- and %o2, 0x3f, %o2 ! A0 Group
- andcc %o2, 0x38, %g2 ! A0 Group
- be,pn %icc, U3copy_from_user_endcruft ! BR
- subcc %g2, 0x8, %g2 ! A1
- be,pn %icc, U3copy_from_user_endcruft ! BR Group
- cmp %g1, 0 ! A0
-
- be,a,pt %icc, 1f ! BR Group
- EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0) ! MS
-
-1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0) ! MS Group
- add %o1, 0x8, %o1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f0, %f2, %f8 ! FGA Group
- std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
- be,pn %icc, U3copy_from_user_endcruft ! BR
- add %o0, 0x8, %o0 ! A0
- EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0) ! MS Group
- add %o1, 0x8, %o1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f2, %f0, %f8 ! FGA
- std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
- bne,pn %icc, 1b ! BR
- add %o0, 0x8, %o0 ! A0 Group
+loopend:
+ and %o2, 0x3f, %o2
+ andcc %o2, 0x38, %g2
+ be,pn %XCC, endcruft
+ subcc %g2, 0x8, %g2
+ be,pn %XCC, endcruft
+ cmp %g1, 0
+
+ be,a,pt %XCC, 1f
+ EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0)
+
+1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0)
+ add %o1, 0x8, %o1
+ sub %o2, 0x8, %o2
+ subcc %g2, 0x8, %g2
+ faligndata %f0, %f2, %f8
+ std %f8, [%o0 + 0x00]
+ be,pn %XCC, endcruft
+ add %o0, 0x8, %o0
+ EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0)
+ add %o1, 0x8, %o1
+ sub %o2, 0x8, %o2
+ subcc %g2, 0x8, %g2
+ faligndata %f2, %f0, %f8
+ std %f8, [%o0 + 0x00]
+ bne,pn %XCC, 1b
+ add %o0, 0x8, %o0
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-U3copy_from_user_endcruft:
+endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %icc, U3copy_from_user_short_ret
- nop
- ba,a,pt %xcc, U3copy_from_user_short
-
- /* If we get here, then 32 <= len < (6 * 64) */
-U3copy_from_user_toosmall:
-
-#ifdef SMALL_COPY_USES_FPU
-
- /* Is 'dst' already aligned on an 8-byte boundary? */
- be,pt %xcc, 2f ! BR Group
-
- /* Compute abs((dst & 7) - 8) into %g2. This is the number
- * of bytes to copy to make 'dst' 8-byte aligned. We pre-
- * subtract this from 'len'.
- */
- sub %g2, 0x8, %g2 ! A0
- sub %g0, %g2, %g2 ! A0 Group (reg-dep)
- sub %o2, %g2, %o2 ! A0 Group (reg-dep)
+ be,pn %XCC, out
+ sub %o0, %o1, %o3
- /* Copy %g2 bytes from src to dst, one byte at a time. */
-1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group) (%o3 in 3 cycles)
- add %o1, 0x1, %o1 ! A1
- add %o0, 0x1, %o0 ! A0 Group
- subcc %g2, 0x1, %g2 ! A1
-
- bg,pt %icc, 1b ! BR Group
- stb %o3, [%o0 + -1] ! MS Group
+ andcc %g1, 0x7, %g0
+ bne,pn %icc, small_copy_unaligned
+ andcc %o2, 0x8, %g0
+ be,pt %icc, 1f
+ nop
+ EXNV(ldxa [%o1] %asi, %o5, add %o2, %g0)
+ stx %o5, [%o1 + %o3]
+ add %o1, 0x8, %o1
-2: VISEntryHalf ! MS+MS
+1: andcc %o2, 0x4, %g0
+ be,pt %icc, 1f
+ nop
+ EXNV(lduwa [%o1] %asi, %o5, and %o2, 0x7)
+ stw %o5, [%o1 + %o3]
+ add %o1, 0x4, %o1
- /* Compute (len - (len % 8)) into %g2. This is guaranteed
- * to be nonzero.
- */
- andn %o2, 0x7, %g2 ! A0 Group
-
- /* You may read this and believe that it allows reading
- * one 8-byte longword past the end of src. It actually
- * does not, as %g2 is subtracted as loads are done from
- * src, so we always stop before running off the end.
- * Also, we are guaranteed to have at least 0x10 bytes
- * to move here.
- */
- sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
- alignaddr %o1, %g0, %g1 ! MS (Break-after)
- EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group (1-cycle stall)
- add %g1, 0x8, %g1 ! A0
-
-1: EX(ldda [%g1 + 0x00] %asi, %f2, add %o2, %g0) ! MS Group
- add %g1, 0x8, %g1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
-
- faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
- std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
- add %o1, 0x8, %o1 ! A0
- be,pn %icc, 2f ! BR
-
- add %o0, 0x8, %o0 ! A1
- EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group
- add %g1, 0x8, %g1 ! A0
- sub %o2, 0x8, %o2 ! A1
-
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
- std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
- add %o1, 0x8, %o1 ! A0
-
- bne,pn %icc, 1b ! BR
- add %o0, 0x8, %o0 ! A1
-
- /* Nothing left to copy? */
-2: cmp %o2, 0 ! A0 Group
- VISExitHalf ! A0+MS
- be,pn %icc, U3copy_from_user_short_ret! BR Group
- nop ! A0
- ba,a,pt %xcc, U3copy_from_user_short ! BR Group
-
-#else /* !(SMALL_COPY_USES_FPU) */
-
- xor %o1, %o0, %g2
- andcc %g2, 0x7, %g0
- bne,pn %icc, U3copy_from_user_short
- andcc %o1, 0x7, %g2
-
- be,pt %xcc, 2f
- sub %g2, 0x8, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
+1: andcc %o2, 0x2, %g0
+ be,pt %icc, 1f
+ nop
+ EXNV(lduha [%o1] %asi, %o5, and %o2, 0x3)
+ sth %o5, [%o1 + %o3]
+ add %o1, 0x2, %o1
-1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
- bg,pt %icc, 1b
- stb %o3, [%o0 + -1]
+1: andcc %o2, 0x1, %g0
+ be,pt %icc, out
+ nop
+ EXNV(lduba [%o1] %asi, %o5, and %o2, 0x1)
+ ba,pt %xcc, out
+ stb %o5, [%o1 + %o3]
+
+medium_copy: /* 16 < len <= 64 */
+ bne,pn %XCC, small_copy_unaligned
+ sub %o0, %o1, %o3
+
+medium_copy_aligned:
+ andn %o2, 0x7, %o4
+ and %o2, 0x7, %o2
+1: subcc %o4, 0x8, %o4
+ EXNV8(ldxa [%o1] %asi, %o5, add %o2, %o4)
+ stx %o5, [%o1 + %o3]
+ bgu,pt %XCC, 1b
+ add %o1, 0x8, %o1
+ andcc %o2, 0x4, %g0
+ be,pt %XCC, 1f
+ nop
+ sub %o2, 0x4, %o2
+ EXNV4(lduwa [%o1] %asi, %o5, add %o2, %g0)
+ stw %o5, [%o1 + %o3]
+ add %o1, 0x4, %o1
+1: cmp %o2, 0
+ be,pt %XCC, out
+ nop
+ ba,pt %xcc, small_copy_unaligned
+ nop
-2: andn %o2, 0x7, %g2
- sub %o2, %g2, %o2
+small_copy: /* 0 < len <= 16 */
+ andcc %o3, 0x3, %g0
+ bne,pn %XCC, small_copy_unaligned
+ sub %o0, %o1, %o3
-3: EXNV(ldxa [%o1 + 0x00] %asi, %o3, add %o2, %g2)
- add %o1, 0x8, %o1
- add %o0, 0x8, %o0
- subcc %g2, 0x8, %g2
- bg,pt %icc, 3b
- stx %o3, [%o0 + -8]
+small_copy_aligned:
+ subcc %o2, 4, %o2
+ EXNV(lduwa [%o1] %asi, %g1, add %o2, %g0)
+ stw %g1, [%o1 + %o3]
+ bgu,pt %XCC, small_copy_aligned
+ add %o1, 4, %o1
- cmp %o2, 0
- bne,pn %icc, U3copy_from_user_short
- nop
- ba,a,pt %xcc, U3copy_from_user_short_ret
+out: retl
+ clr %o0
-#endif /* !(SMALL_COPY_USES_FPU) */
+ .align 32
+small_copy_unaligned:
+ subcc %o2, 1, %o2
+ EXNV(lduba [%o1] %asi, %g1, add %o2, %g0)
+ stb %g1, [%o1 + %o3]
+ bgu,pt %XCC, small_copy_unaligned
+ add %o1, 1, %o1
+ retl
+ clr %o0
-#ifdef __KERNEL__
- .globl U3cfu_fixup
U3cfu_fixup:
/* Since this is copy_from_user(), zero out the rest of the
* kernel buffer.
2: retl
mov %o1, %o0
-#endif
-/* $Id: U3copy_in_user.S,v 1.4 2001/03/21 05:58:47 davem Exp $
- * U3memcpy.S: UltraSparc-III optimized copy within userspace.
+/* U3copy_in_user.S: UltraSparc-III optimized memcpy.
*
- * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
-#undef SMALL_COPY_USES_FPU
+#include <asm/dcu.h>
+#include <asm/spitfire.h>
+
+#define XCC xcc
+
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV2(x,y,a,b) \
+#define EXNV1(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXNV3(x,y,a,b) \
+#define EXNV4(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
99: a, b, %o0; \
retl; \
- add %o0, 8, %o0; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
-#define EX(x,y,a,b) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: VISExitHalf; \
- retl; \
- a, b, %o0; \
+ add %o0, 4, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#define EXBLK1(x,y) \
+#define EXNV8(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
-99: VISExitHalf; \
- add %o4, 0x1c0, %o1; \
- and %o2, (0x40 - 1), %o2; \
- retl; \
- add %o1, %o2, %o0; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
-#define EXBLK2(x,y) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: VISExitHalf; \
- sll %o3, 6, %o3; \
- and %o2, (0x40 - 1), %o2; \
- add %o3, 0x80, %o1; \
- retl; \
- add %o1, %o2, %o0; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
-#define EXBLK3(x,y) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: VISExitHalf; \
- and %o2, (0x40 - 1), %o2; \
- retl; \
- add %o2, 0x80, %o0; \
- .section __ex_table; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4;
-#define EXBLK4(x,y) \
-98: x,y; \
- .section .fixup; \
- .align 4; \
-99: VISExitHalf; \
- and %o2, (0x40 - 1), %o2; \
+99: a, b, %o0; \
retl; \
- add %o2, 0x40, %o0; \
+ add %o0, 8, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#else
-#define ASI_AIUS 0x80
-#define ASI_BLK_AIUS 0xf0
-#define FPRS_FEF 0x04
-#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
-#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
-#define SMALL_COPY_USES_FPU
-#define EXNV(x,y,a,b) x,y;
-#define EXNV2(x,y,a,b) x,y;
-#define EXNV3(x,y,a,b) x,y;
-#define EX(x,y,a,b) x,y;
-#define EXBLK1(x,y) x,y;
-#define EXBLK2(x,y) x,y;
-#define EXBLK3(x,y) x,y;
-#define EXBLK4(x,y) x,y;
-#endif
- /* Special/non-trivial issues of this code:
- *
- * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
- * 2) Only low 32 FPU registers are used so that only the
- * lower half of the FPU register set is dirtied by this
- * code. This is especially important in the kernel.
- * 3) This code never prefetches cachelines past the end
- * of the source buffer.
- *
- * XXX Actually, Cheetah can buffer up to 8 concurrent
- * XXX prefetches, revisit this...
- */
+ .register %g2,#scratch
+ .register %g3,#scratch
.text
.align 32
- /* The cheetah's flexible spine, oversized liver, enlarged heart,
- * slender muscular body, and claws make it the swiftest hunter
- * in Africa and the fastest animal on land. Can reach speeds
- * of up to 2.4GB per second.
+ /* Don't try to get too fancy here, just nice and
+ * simple. This is predominantly used for well aligned
+ * small copies in the compat layer. It is also used
+ * to copy register windows around during thread cloning.
*/
- .globl U3copy_in_user
-U3copy_in_user: /* %o0=dst, %o1=src, %o2=len */
+ .globl U3copy_in_user
+U3copy_in_user: /* %o0=dst, %o1=src, %o2=len */
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
* cheap.
*/
- rd %asi, %g1 ! MS Group (4 cycles)
- cmp %g1, ASI_AIUS ! A0 Group
- bne U3memcpy ! BR
- nop ! A1
-#ifndef __KERNEL__
- /* Save away original 'dst' for memcpy return value. */
- mov %o0, %g3 ! A0 Group
-#endif
- /* Anything to copy at all? */
- cmp %o2, 0 ! A1
- ble,pn %icc, U3copy_in_user_short_ret ! BR
-
- /* Extremely small copy? */
- cmp %o2, 31 ! A0 Group
- ble,pn %icc, U3copy_in_user_short ! BR
-
- /* Large enough to use unrolled prefetch loops? */
- cmp %o2, 0x100 ! A1
- bge,a,pt %icc, U3copy_in_user_enter ! BR Group
- andcc %o0, 0x3f, %g2 ! A0
-
- ba,pt %xcc, U3copy_in_user_toosmall ! BR Group
- andcc %o0, 0x7, %g2 ! A0
-
- .align 32
-U3copy_in_user_short:
- /* Copy %o2 bytes from src to dst, one byte at a time. */
- EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g0)! MS Group
- add %o1, 0x1, %o1 ! A0
- add %o0, 0x1, %o0 ! A1
- subcc %o2, 1, %o2 ! A0 Group
-
- bg,pt %icc, U3copy_in_user_short ! BR
- EXNV(stba %o3, [%o0 + -1] %asi, add %o2, 1) ! MS Group (1-cycle stall)
-
-U3copy_in_user_short_ret:
-#ifdef __KERNEL__
- retl ! BR Group (0-4 cycle stall)
- clr %o0 ! A0
-#else
- retl ! BR Group (0-4 cycle stall)
- mov %g3, %o0 ! A0
-#endif
-
- /* Here len >= (6 * 64) and condition codes reflect execution
- * of "andcc %o0, 0x7, %g2", done by caller.
- */
- .align 64
-U3copy_in_user_enter:
- /* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %xcc, 2f ! BR
-
- /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
- * of bytes to copy to make 'dst' 64-byte aligned. We pre-
- * subtract this from 'len'.
- */
- sub %g2, 0x40, %g2 ! A0 Group
- sub %g0, %g2, %g2 ! A0 Group
- sub %o2, %g2, %o2 ! A0 Group
-
- /* Copy %g2 bytes from src to dst, one byte at a time. */
-1: EXNV(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group)
- add %o1, 0x1, %o1 ! A1
- add %o0, 0x1, %o0 ! A0 Group
- subcc %g2, 0x1, %g2 ! A1
-
- bg,pt %icc, 1b ! BR Group
- EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
-
-2: VISEntryHalf ! MS+MS
- and %o1, 0x7, %g1 ! A1
- ba,pt %xcc, U3copy_in_user_begin ! BR
- alignaddr %o1, %g0, %o1 ! MS (Break-after)
-
- .align 64
-U3copy_in_user_begin:
- prefetcha [%o1 + 0x000] %asi, #one_read ! MS Group1
- prefetcha [%o1 + 0x040] %asi, #one_read ! MS Group2
- andn %o2, (0x40 - 1), %o4 ! A0
- prefetcha [%o1 + 0x080] %asi, #one_read ! MS Group3
- cmp %o4, 0x140 ! A0
- prefetcha [%o1 + 0x0c0] %asi, #one_read ! MS Group4
- EX(ldda [%o1 + 0x000] %asi, %f0, add %o2, %g0) ! MS Group5 (%f0 results at G8)
- bge,a,pt %icc, 1f ! BR
-
- prefetcha [%o1 + 0x100] %asi, #one_read ! MS Group6
-1: EX(ldda [%o1 + 0x008] %asi, %f2, add %o2, %g0) ! AX (%f2 results at G9)
- cmp %o4, 0x180 ! A1
- bge,a,pt %icc, 1f ! BR
- prefetcha [%o1 + 0x140] %asi, #one_read ! MS Group7
-1: EX(ldda [%o1 + 0x010] %asi, %f4, add %o2, %g0) ! AX (%f4 results at G10)
- cmp %o4, 0x1c0 ! A1
- bge,a,pt %icc, 1f ! BR
-
- prefetcha [%o1 + 0x180] %asi, #one_read ! MS Group8
-1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
- EX(ldda [%o1 + 0x018] %asi, %f6, add %o2, %g0) ! AX (%f6 results at G12)
- faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
- EX(ldda [%o1 + 0x020] %asi, %f8, add %o2, %g0) ! MS (%f8 results at G13)
- faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
- EX(ldda [%o1 + 0x028] %asi, %f10, add %o2, %g0) ! MS (%f10 results at G15)
- faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
-
- EX(ldda [%o1 + 0x030] %asi, %f12, add %o2, %g0) ! MS (%f12 results at G16)
- faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
- EX(ldda [%o1 + 0x038] %asi, %f14, add %o2, %g0) ! MS (%f14 results at G18)
- faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
- EX(ldda [%o1 + 0x040] %asi, %f0, add %o2, %g0) ! MS (%f0 results at G19)
-
- /* We only use the first loop if len > (7 * 64). */
- subcc %o4, 0x1c0, %o4 ! A0 Group17
- bg,pt %icc, U3copy_in_user_loop1 ! BR
- add %o1, 0x40, %o1 ! A1
-
- add %o4, 0x140, %o4 ! A0 Group18
- ba,pt %xcc, U3copy_in_user_loop2 ! BR
- srl %o4, 6, %o3 ! A0 Group19
- nop
- nop
- nop
- nop
- nop
-
- nop
- nop
-
- /* This loop performs the copy and queues new prefetches.
- * We drop into the second loop when len <= (5 * 64). Note
- * that this (5 * 64) factor has been subtracted from len
- * already.
- */
-U3copy_in_user_loop1:
- EXBLK1(ldda [%o1 + 0x008] %asi, %f2) ! MS Group2 (%f2 results at G5)
- faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
- EXBLK1(ldda [%o1 + 0x010] %asi, %f4) ! MS Group3 (%f4 results at G6)
- faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
- EXBLK1(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
- EXBLK1(ldda [%o1 + 0x018] %asi, %f6) ! AX (%f6 results at G7)
-
- faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
- EXBLK1(ldda [%o1 + 0x020] %asi, %f8) ! MS (%f8 results at G15)
- faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
- EXBLK1(ldda [%o1 + 0x028] %asi, %f10) ! MS (%f10 results at G16)
- faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
- EXBLK1(ldda [%o1 + 0x030] %asi, %f12) ! MS (%f12 results at G17)
- faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
- EXBLK1(ldda [%o1 + 0x038] %asi, %f14) ! MS (%f14 results at G18)
-
- faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
- EXBLK1(ldda [%o1 + 0x040] %asi, %f0) ! AX (%f0 results at G19)
- prefetcha [%o1 + 0x180] %asi, #one_read ! MS
- faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
- subcc %o4, 0x40, %o4 ! A0
- add %o1, 0x40, %o1 ! A1
- bg,pt %xcc, U3copy_in_user_loop1 ! BR
- add %o0, 0x40, %o0 ! A0 Group18
-
-U3copy_in_user_loop2_enter:
- mov 5, %o3 ! A1
-
- /* This loop performs on the copy, no new prefetches are
- * queued. We do things this way so that we do not perform
- * any spurious prefetches past the end of the src buffer.
- */
-U3copy_in_user_loop2:
- EXBLK2(ldda [%o1 + 0x008] %asi, %f2) ! MS
- faligndata %f12, %f14, %f28 ! FGA Group2
- EXBLK2(ldda [%o1 + 0x010] %asi, %f4) ! MS
- faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
- EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
- EXBLK2(ldda [%o1 + 0x018] %asi, %f6) ! AX
- faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
-
- EXBLK2(ldda [%o1 + 0x020] %asi, %f8) ! MS
- faligndata %f2, %f4, %f18 ! FGA Group13
- EXBLK2(ldda [%o1 + 0x028] %asi, %f10) ! MS
- faligndata %f4, %f6, %f20 ! FGA Group14
- EXBLK2(ldda [%o1 + 0x030] %asi, %f12) ! MS
- faligndata %f6, %f8, %f22 ! FGA Group15
- EXBLK2(ldda [%o1 + 0x038] %asi, %f14) ! MS
- faligndata %f8, %f10, %f24 ! FGA Group16
-
- EXBLK2(ldda [%o1 + 0x040] %asi, %f0) ! AX
- faligndata %f10, %f12, %f26 ! FGA Group17
- subcc %o3, 0x01, %o3 ! A0
- add %o1, 0x40, %o1 ! A1
- bg,pt %xcc, U3copy_in_user_loop2 ! BR
- add %o0, 0x40, %o0 ! A0 Group18
-
- /* Finally we copy the last full 64-byte block. */
-U3copy_in_user_loopfini:
- EXBLK3(ldda [%o1 + 0x008] %asi, %f2) ! MS
- faligndata %f12, %f14, %f28 ! FGA
- EXBLK3(ldda [%o1 + 0x010] %asi, %f4) ! MS Group19
- faligndata %f14, %f0, %f30 ! FGA
- EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS) ! MS Group20
- EXBLK4(ldda [%o1 + 0x018] %asi, %f6) ! AX
- faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
- EXBLK4(ldda [%o1 + 0x020] %asi, %f8) ! MS
- faligndata %f2, %f4, %f18 ! FGA Group12
- EXBLK4(ldda [%o1 + 0x028] %asi, %f10) ! MS
- faligndata %f4, %f6, %f20 ! FGA Group13
- EXBLK4(ldda [%o1 + 0x030] %asi, %f12) ! MS
- faligndata %f6, %f8, %f22 ! FGA Group14
- EXBLK4(ldda [%o1 + 0x038] %asi, %f14) ! MS
- faligndata %f8, %f10, %f24 ! FGA Group15
- cmp %g1, 0 ! A0
- be,pt %icc, 1f ! BR
- add %o0, 0x40, %o0 ! A1
- EXBLK4(ldda [%o1 + 0x040] %asi, %f0) ! MS
-1: faligndata %f10, %f12, %f26 ! FGA Group16
- faligndata %f12, %f14, %f28 ! FGA Group17
- faligndata %f14, %f0, %f30 ! FGA Group18
- EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
- add %o0, 0x40, %o0 ! A0
- add %o1, 0x40, %o1 ! A1
- membar #Sync ! MS Group26 (7-cycle stall)
-
- /* Now we copy the (len modulo 64) bytes at the end.
- * Note how we borrow the %f0 loaded above.
- *
- * Also notice how this code is careful not to perform a
- * load past the end of the src buffer just like similar
- * code found in U3copy_in_user_toosmall processing.
- */
-U3copy_in_user_loopend:
- and %o2, 0x3f, %o2 ! A0 Group
- andcc %o2, 0x38, %g2 ! A0 Group
- be,pn %icc, U3copy_in_user_endcruft ! BR
- subcc %g2, 0x8, %g2 ! A1
- be,pn %icc, U3copy_in_user_endcruft ! BR Group
- cmp %g1, 0 ! A0
-
- be,a,pt %icc, 1f ! BR Group
- EX(ldda [%o1 + 0x00] %asi, %f0, add %o2, %g0) ! MS
-
-1: EX(ldda [%o1 + 0x08] %asi, %f2, add %o2, %g0) ! MS Group
- add %o1, 0x8, %o1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f0, %f2, %f8 ! FGA Group
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
- be,pn %icc, U3copy_in_user_endcruft ! BR
- add %o0, 0x8, %o0 ! A0
- EX(ldda [%o1 + 0x08] %asi, %f0, add %o2, %g0) ! MS Group
- add %o1, 0x8, %o1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f2, %f0, %f8 ! FGA
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
- bne,pn %icc, 1b ! BR
- add %o0, 0x8, %o0 ! A0 Group
+ rd %asi, %g1
+ cmp %g1, ASI_AIUS
+ bne,pn %icc, U3memcpy_user_stub
+ nop
- /* If anything is left, we copy it one byte at a time.
- * Note that %g1 is (src & 0x3) saved above before the
- * alignaddr was performed.
- */
-U3copy_in_user_endcruft:
cmp %o2, 0
- add %o1, %g1, %o1
- VISExitHalf
- be,pn %icc, U3copy_in_user_short_ret
+ be,pn %XCC, out
+ or %o0, %o1, %o3
+ cmp %o2, 16
+ bleu,a,pn %XCC, small_copy
+ or %o3, %o2, %o3
+
+medium_copy: /* 16 < len <= 64 */
+ andcc %o3, 0x7, %g0
+ bne,pn %XCC, small_copy_unaligned
+ sub %o0, %o1, %o3
+
+medium_copy_aligned:
+ andn %o2, 0x7, %o4
+ and %o2, 0x7, %o2
+1: subcc %o4, 0x8, %o4
+ EXNV8(ldxa [%o1] %asi, %o5, add %o4, %o2)
+ EXNV8(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o4, %o2)
+ bgu,pt %XCC, 1b
+ add %o1, 0x8, %o1
+ andcc %o2, 0x4, %g0
+ be,pt %XCC, 1f
+ nop
+ sub %o2, 0x4, %o2
+ EXNV4(lduwa [%o1] %asi, %o5, add %o4, %o2)
+ EXNV4(stwa %o5, [%o1 + %o3] ASI_AIUS, add %o4, %o2)
+ add %o1, 0x4, %o1
+1: cmp %o2, 0
+ be,pt %XCC, out
+ nop
+ ba,pt %xcc, small_copy_unaligned
nop
- ba,a,pt %xcc, U3copy_in_user_short
-
- /* If we get here, then 32 <= len < (6 * 64) */
-U3copy_in_user_toosmall:
-
-#ifdef SMALL_COPY_USES_FPU
-
- /* Is 'dst' already aligned on an 8-byte boundary? */
- be,pt %xcc, 2f ! BR Group
-
- /* Compute abs((dst & 7) - 8) into %g2. This is the number
- * of bytes to copy to make 'dst' 8-byte aligned. We pre-
- * subtract this from 'len'.
- */
- sub %g2, 0x8, %g2 ! A0
- sub %g0, %g2, %g2 ! A0 Group (reg-dep)
- sub %o2, %g2, %o2 ! A0 Group (reg-dep)
-
- /* Copy %g2 bytes from src to dst, one byte at a time. */
-1: EXNV2(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)! MS (Group) (%o3 in 3 cycles)
- add %o1, 0x1, %o1 ! A1
- add %o0, 0x1, %o0 ! A0 Group
- subcc %g2, 0x1, %g2 ! A1
-
- bg,pt %icc, 1b ! BR Group
- EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
-
-2: VISEntryHalf ! MS+MS
-
- /* Compute (len - (len % 8)) into %g2. This is guaranteed
- * to be nonzero.
- */
- andn %o2, 0x7, %g2 ! A0 Group
-
- /* You may read this and believe that it allows reading
- * one 8-byte longword past the end of src. It actually
- * does not, as %g2 is subtracted as loads are done from
- * src, so we always stop before running off the end.
- * Also, we are guaranteed to have at least 0x10 bytes
- * to move here.
- */
- sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
- alignaddr %o1, %g0, %g1 ! MS (Break-after)
- EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group (1-cycle stall)
- add %g1, 0x8, %g1 ! A0
-
-1: EX(ldda [%g1 + 0x00] %asi, %f2, add %o2, %g0) ! MS Group
- add %g1, 0x8, %g1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
-
- faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
- add %o1, 0x8, %o1 ! A0
- be,pn %icc, 2f ! BR
-
- add %o0, 0x8, %o0 ! A1
- EX(ldda [%g1 + 0x00] %asi, %f0, add %o2, %g0) ! MS Group
- add %g1, 0x8, %g1 ! A0
- sub %o2, 0x8, %o2 ! A1
-
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
- add %o1, 0x8, %o1 ! A0
-
- bne,pn %icc, 1b ! BR
- add %o0, 0x8, %o0 ! A1
-
- /* Nothing left to copy? */
-2: cmp %o2, 0 ! A0 Group
- VISExitHalf ! A0+MS
- be,pn %icc, U3copy_in_user_short_ret ! BR Group
- nop ! A0
- ba,a,pt %xcc, U3copy_in_user_short ! BR Group
-
-#else /* !(SMALL_COPY_USES_FPU) */
-
- xor %o1, %o0, %g2
- andcc %g2, 0x7, %g0
- bne,pn %icc, U3copy_in_user_short
- andcc %o1, 0x7, %g2
-
- be,pt %xcc, 2f
- sub %g2, 0x8, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
-
-1: EXNV2(lduba [%o1 + 0x00] %asi, %o3, add %o2, %g2)
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
- bg,pt %icc, 1b
- EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)
-2: andn %o2, 0x7, %g2
- sub %o2, %g2, %o2
+small_copy: /* 0 < len <= 16 */
+ andcc %o3, 0x3, %g0
+ bne,pn %XCC, small_copy_unaligned
+ sub %o0, %o1, %o3
-3: EXNV3(ldxa [%o1 + 0x00] %asi, %o3, add %o2, %g2)
- add %o1, 0x8, %o1
- add %o0, 0x8, %o0
- subcc %g2, 0x8, %g2
- bg,pt %icc, 3b
- EXNV3(stxa %o3, [%o0 + -8] %asi, add %o2, %g2)
+small_copy_aligned:
+ subcc %o2, 4, %o2
+ EXNV4(lduwa [%o1] %asi, %g1, add %o2, %g0)
+ EXNV4(stwa %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
+ bgu,pt %XCC, small_copy_aligned
+ add %o1, 4, %o1
- cmp %o2, 0
- bne,pn %icc, U3copy_in_user_short
- nop
- ba,a,pt %xcc, U3copy_in_user_short_ret
+out: retl
+ clr %o0
-#endif /* !(SMALL_COPY_USES_FPU) */
+ .align 32
+small_copy_unaligned:
+ subcc %o2, 1, %o2
+ EXNV1(lduba [%o1] %asi, %g1, add %o2, %g0)
+ EXNV1(stba %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
+ bgu,pt %XCC, small_copy_unaligned
+ add %o1, 1, %o1
+ retl
+ clr %o0
-/* $Id: U3copy_to_user.S,v 1.3 2000/11/01 09:29:19 davem Exp $
- * U3memcpy.S: UltraSparc-III optimized copy to userspace.
+/* U3copy_to_user.S: UltraSparc-III optimized memcpy.
*
- * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
-#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
-#undef SMALL_COPY_USES_FPU
+
+#define XCC xcc
+
#define EXNV(x,y,a,b) \
98: x,y; \
.section .fixup; \
.text; \
.align 4;
#define EXNV3(x,y,a,b) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: a, b, %o0; \
+ retl; \
+ add %o0, 4, %o0; \
+ .section __ex_table; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXNV4(x,y,a,b) \
98: x,y; \
.section .fixup; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
-#else
-#define ASI_AIUS 0x80
-#define ASI_BLK_AIUS 0xf0
-#define FPRS_FEF 0x04
-#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
-#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
-#define SMALL_COPY_USES_FPU
-#define EXNV(x,y,a,b) x,y;
-#define EXNV2(x,y,a,b) x,y;
-#define EXNV3(x,y,a,b) x,y;
-#define EX(x,y,a,b) x,y;
-#define EXBLK1(x,y) x,y;
-#define EXBLK2(x,y) x,y;
-#define EXBLK3(x,y) x,y;
-#define EXBLK4(x,y) x,y;
-#endif
+
+ .register %g2,#scratch
+ .register %g3,#scratch
/* Special/non-trivial issues of this code:
*
* of up to 2.4GB per second.
*/
- .globl U3copy_to_user
-U3copy_to_user: /* %o0=dst, %o1=src, %o2=len */
+ .globl U3copy_to_user
+U3copy_to_user: /* %o0=dst, %o1=src, %o2=len */
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
* cheap.
*/
- rd %asi, %g1 ! MS Group (4 cycles)
- cmp %g1, ASI_AIUS ! A0 Group
- bne U3memcpy ! BR
- nop ! A1
-#ifndef __KERNEL__
- /* Save away original 'dst' for memcpy return value. */
- mov %o0, %g3 ! A0 Group
-#endif
- /* Anything to copy at all? */
- cmp %o2, 0 ! A1
- ble,pn %icc, U3copy_to_user_short_ret ! BR
-
- /* Extremely small copy? */
- cmp %o2, 31 ! A0 Group
- ble,pn %icc, U3copy_to_user_short ! BR
-
- /* Large enough to use unrolled prefetch loops? */
- cmp %o2, 0x100 ! A1
- bge,a,pt %icc, U3copy_to_user_enter ! BR Group
- andcc %o0, 0x3f, %g2 ! A0
-
- ba,pt %xcc, U3copy_to_user_toosmall ! BR Group
- andcc %o0, 0x7, %g2 ! A0
-
- .align 32
-U3copy_to_user_short:
- /* Copy %o2 bytes from src to dst, one byte at a time. */
- ldub [%o1 + 0x00], %o3 ! MS Group
- add %o1, 0x1, %o1 ! A0
- add %o0, 0x1, %o0 ! A1
- subcc %o2, 1, %o2 ! A0 Group
-
- bg,pt %icc, U3copy_to_user_short ! BR
- EXNV(stba %o3, [%o0 + -1] %asi, add %o2, 1) ! MS Group (1-cycle stall)
-
-U3copy_to_user_short_ret:
-#ifdef __KERNEL__
- retl ! BR Group (0-4 cycle stall)
- clr %o0 ! A0
-#else
- retl ! BR Group (0-4 cycle stall)
- mov %g3, %o0 ! A0
-#endif
-
- /* Here len >= (6 * 64) and condition codes reflect execution
+ rd %asi, %g1
+ cmp %g1, ASI_AIUS
+ bne,pn %icc, U3memcpy_user_stub
+ nop
+
+ cmp %o2, 0
+ be,pn %XCC, out
+ or %o0, %o1, %o3
+ cmp %o2, 16
+ bleu,a,pn %XCC, small_copy
+ or %o3, %o2, %o3
+
+ cmp %o2, 256
+ blu,pt %XCC, medium_copy
+ andcc %o3, 0x7, %g0
+
+ ba,pt %xcc, enter
+ andcc %o0, 0x3f, %g2
+
+ /* Here len >= 256 and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-U3copy_to_user_enter:
+enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %xcc, 2f ! BR
+ be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2 ! A0 Group
- sub %g0, %g2, %g2 ! A0 Group
- sub %o2, %g2, %o2 ! A0 Group
+ sub %g2, 0x40, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: ldub [%o1 + 0x00], %o3 ! MS (Group)
- add %o1, 0x1, %o1 ! A1
- add %o0, 0x1, %o0 ! A0 Group
- subcc %g2, 0x1, %g2 ! A1
+1: ldub [%o1 + 0x00], %o3
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
- bg,pt %icc, 1b ! BR Group
- EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
+ bg,pt %XCC, 1b
+ EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)
-2: VISEntryHalf ! MS+MS
- and %o1, 0x7, %g1 ! A1
- ba,pt %xcc, U3copy_to_user_begin ! BR
- alignaddr %o1, %g0, %o1 ! MS (Break-after)
+2: VISEntryHalf
+ and %o1, 0x7, %g1
+ ba,pt %xcc, begin
+ alignaddr %o1, %g0, %o1
.align 64
-U3copy_to_user_begin:
-#ifdef __KERNEL__
+begin:
+
.globl U3copy_to_user_nop_1_6
U3copy_to_user_nop_1_6:
ldxa [%g0] ASI_DCU_CONTROL_REG, %g3
or %g3, %o3, %o3
stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
membar #Sync
-#endif
- prefetch [%o1 + 0x000], #one_read ! MS Group1
- prefetch [%o1 + 0x040], #one_read ! MS Group2
- andn %o2, (0x40 - 1), %o4 ! A0
- prefetch [%o1 + 0x080], #one_read ! MS Group3
- cmp %o4, 0x140 ! A0
- prefetch [%o1 + 0x0c0], #one_read ! MS Group4
- ldd [%o1 + 0x000], %f0 ! MS Group5 (%f0 results at G8)
- bge,a,pt %icc, 1f ! BR
-
- prefetch [%o1 + 0x100], #one_read ! MS Group6
-1: ldd [%o1 + 0x008], %f2 ! AX (%f2 results at G9)
- cmp %o4, 0x180 ! A1
- bge,a,pt %icc, 1f ! BR
- prefetch [%o1 + 0x140], #one_read ! MS Group7
-1: ldd [%o1 + 0x010], %f4 ! AX (%f4 results at G10)
- cmp %o4, 0x1c0 ! A1
- bge,a,pt %icc, 1f ! BR
-
- prefetch [%o1 + 0x180], #one_read ! MS Group8
-1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
- ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G12)
- faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
- ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G13)
- faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
- ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G15)
- faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
-
- ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G16)
- faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
- ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
- faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
- ldd [%o1 + 0x040], %f0 ! MS (%f0 results at G19)
-
- /* We only use the first loop if len > (7 * 64). */
- subcc %o4, 0x1c0, %o4 ! A0 Group17
- bg,pt %icc, U3copy_to_user_loop1 ! BR
- add %o1, 0x40, %o1 ! A1
-
- add %o4, 0x140, %o4 ! A0 Group18
- ba,pt %xcc, U3copy_to_user_loop2 ! BR
- srl %o4, 6, %o3 ! A0 Group19
- nop
- nop
- nop
- nop
- nop
-
- nop
- nop
-
- /* This loop performs the copy and queues new prefetches.
- * We drop into the second loop when len <= (5 * 64). Note
- * that this (5 * 64) factor has been subtracted from len
- * already.
- */
-U3copy_to_user_loop1:
- ldd [%o1 + 0x008], %f2 ! MS Group2 (%f2 results at G5)
- faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
- ldd [%o1 + 0x010], %f4 ! MS Group3 (%f4 results at G6)
- faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
- EXBLK1(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
- ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G7)
-
- faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
- ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G15)
- faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
- ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G16)
- faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
- ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G17)
- faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
- ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
-
- faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
- ldd [%o1 + 0x040], %f0 ! AX (%f0 results at G19)
- prefetch [%o1 + 0x180], #one_read ! MS
- faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
- subcc %o4, 0x40, %o4 ! A0
- add %o1, 0x40, %o1 ! A1
- bg,pt %xcc, U3copy_to_user_loop1 ! BR
- add %o0, 0x40, %o0 ! A0 Group18
-
-U3copy_to_user_loop2_enter:
- mov 5, %o3 ! A1
-
- /* This loop performs on the copy, no new prefetches are
- * queued. We do things this way so that we do not perform
- * any spurious prefetches past the end of the src buffer.
- */
-U3copy_to_user_loop2:
- ldd [%o1 + 0x008], %f2 ! MS
- faligndata %f12, %f14, %f28 ! FGA Group2
- ldd [%o1 + 0x010], %f4 ! MS
- faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
- EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
- ldd [%o1 + 0x018], %f6 ! AX
- faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
-
- ldd [%o1 + 0x020], %f8 ! MS
- faligndata %f2, %f4, %f18 ! FGA Group13
- ldd [%o1 + 0x028], %f10 ! MS
- faligndata %f4, %f6, %f20 ! FGA Group14
- ldd [%o1 + 0x030], %f12 ! MS
- faligndata %f6, %f8, %f22 ! FGA Group15
- ldd [%o1 + 0x038], %f14 ! MS
- faligndata %f8, %f10, %f24 ! FGA Group16
-
- ldd [%o1 + 0x040], %f0 ! AX
- faligndata %f10, %f12, %f26 ! FGA Group17
- subcc %o3, 0x01, %o3 ! A0
- add %o1, 0x40, %o1 ! A1
- bg,pt %xcc, U3copy_to_user_loop2 ! BR
- add %o0, 0x40, %o0 ! A0 Group18
+
+ prefetch [%o1 + 0x000], #one_read
+ prefetch [%o1 + 0x040], #one_read
+ andn %o2, (0x40 - 1), %o4
+ prefetch [%o1 + 0x080], #one_read
+ prefetch [%o1 + 0x0c0], #one_read
+ ldd [%o1 + 0x000], %f0
+ prefetch [%o1 + 0x100], #one_read
+ ldd [%o1 + 0x008], %f2
+ prefetch [%o1 + 0x140], #one_read
+ ldd [%o1 + 0x010], %f4
+ prefetch [%o1 + 0x180], #one_read
+ faligndata %f0, %f2, %f16
+ ldd [%o1 + 0x018], %f6
+ faligndata %f2, %f4, %f18
+ ldd [%o1 + 0x020], %f8
+ faligndata %f4, %f6, %f20
+ ldd [%o1 + 0x028], %f10
+ faligndata %f6, %f8, %f22
+
+ ldd [%o1 + 0x030], %f12
+ faligndata %f8, %f10, %f24
+ ldd [%o1 + 0x038], %f14
+ faligndata %f10, %f12, %f26
+ ldd [%o1 + 0x040], %f0
+
+ sub %o4, 0x80, %o4
+ add %o1, 0x40, %o1
+ ba,pt %xcc, loop
+ srl %o4, 6, %o3
+
+ .align 64
+loop:
+ ldd [%o1 + 0x008], %f2
+ faligndata %f12, %f14, %f28
+ ldd [%o1 + 0x010], %f4
+ faligndata %f14, %f0, %f30
+ EXBLK2(stda %f16, [%o0] ASI_BLK_AIUS)
+ ldd [%o1 + 0x018], %f6
+ faligndata %f0, %f2, %f16
+
+ ldd [%o1 + 0x020], %f8
+ faligndata %f2, %f4, %f18
+ ldd [%o1 + 0x028], %f10
+ faligndata %f4, %f6, %f20
+ ldd [%o1 + 0x030], %f12
+ faligndata %f6, %f8, %f22
+ ldd [%o1 + 0x038], %f14
+ faligndata %f8, %f10, %f24
+
+ ldd [%o1 + 0x040], %f0
+ prefetch [%o1 + 0x180], #one_read
+ faligndata %f10, %f12, %f26
+ subcc %o3, 0x01, %o3
+ add %o1, 0x40, %o1
+ bg,pt %XCC, loop
+ add %o0, 0x40, %o0
/* Finally we copy the last full 64-byte block. */
-U3copy_to_user_loopfini:
- ldd [%o1 + 0x008], %f2 ! MS
- faligndata %f12, %f14, %f28 ! FGA
- ldd [%o1 + 0x010], %f4 ! MS Group19
- faligndata %f14, %f0, %f30 ! FGA
- EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS) ! MS Group20
- ldd [%o1 + 0x018], %f6 ! AX
- faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
- ldd [%o1 + 0x020], %f8 ! MS
- faligndata %f2, %f4, %f18 ! FGA Group12
- ldd [%o1 + 0x028], %f10 ! MS
- faligndata %f4, %f6, %f20 ! FGA Group13
- ldd [%o1 + 0x030], %f12 ! MS
- faligndata %f6, %f8, %f22 ! FGA Group14
- ldd [%o1 + 0x038], %f14 ! MS
- faligndata %f8, %f10, %f24 ! FGA Group15
- cmp %g1, 0 ! A0
- be,pt %icc, 1f ! BR
- add %o0, 0x40, %o0 ! A1
- ldd [%o1 + 0x040], %f0 ! MS
-1: faligndata %f10, %f12, %f26 ! FGA Group16
- faligndata %f12, %f14, %f28 ! FGA Group17
- faligndata %f14, %f0, %f30 ! FGA Group18
- EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS) ! MS
- add %o0, 0x40, %o0 ! A0
- add %o1, 0x40, %o1 ! A1
-#ifdef __KERNEL__
+loopfini:
+ ldd [%o1 + 0x008], %f2
+ faligndata %f12, %f14, %f28
+ ldd [%o1 + 0x010], %f4
+ faligndata %f14, %f0, %f30
+ EXBLK3(stda %f16, [%o0] ASI_BLK_AIUS)
+ ldd [%o1 + 0x018], %f6
+ faligndata %f0, %f2, %f16
+ ldd [%o1 + 0x020], %f8
+ faligndata %f2, %f4, %f18
+ ldd [%o1 + 0x028], %f10
+ faligndata %f4, %f6, %f20
+ ldd [%o1 + 0x030], %f12
+ faligndata %f6, %f8, %f22
+ ldd [%o1 + 0x038], %f14
+ faligndata %f8, %f10, %f24
+ cmp %g1, 0
+ be,pt %XCC, 1f
+ add %o0, 0x40, %o0
+ ldd [%o1 + 0x040], %f0
+1: faligndata %f10, %f12, %f26
+ faligndata %f12, %f14, %f28
+ faligndata %f14, %f0, %f30
+ EXBLK4(stda %f16, [%o0] ASI_BLK_AIUS)
+ add %o0, 0x40, %o0
+ add %o1, 0x40, %o1
+
.globl U3copy_to_user_nop_2_3
U3copy_to_user_nop_2_3:
mov PRIMARY_CONTEXT, %o3
stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
-#endif
- membar #Sync ! MS Group26 (7-cycle stall)
+
+ membar #Sync
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer just like similar
- * code found in U3copy_to_user_toosmall processing.
+ * load past the end of the src buffer.
*/
-U3copy_to_user_loopend:
- and %o2, 0x3f, %o2 ! A0 Group
- andcc %o2, 0x38, %g2 ! A0 Group
- be,pn %icc, U3copy_to_user_endcruft ! BR
- subcc %g2, 0x8, %g2 ! A1
- be,pn %icc, U3copy_to_user_endcruft ! BR Group
- cmp %g1, 0 ! A0
-
- be,a,pt %icc, 1f ! BR Group
- ldd [%o1 + 0x00], %f0 ! MS
-
-1: ldd [%o1 + 0x08], %f2 ! MS Group
- add %o1, 0x8, %o1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f0, %f2, %f8 ! FGA Group
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
- be,pn %icc, U3copy_to_user_endcruft ! BR
- add %o0, 0x8, %o0 ! A0
- ldd [%o1 + 0x08], %f0 ! MS Group
- add %o1, 0x8, %o1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f2, %f0, %f8 ! FGA
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS (XXX does it stall here? XXX)
- bne,pn %icc, 1b ! BR
- add %o0, 0x8, %o0 ! A0 Group
+loopend:
+ and %o2, 0x3f, %o2
+ andcc %o2, 0x38, %g2
+ be,pn %XCC, endcruft
+ subcc %g2, 0x8, %g2
+ be,pn %XCC, endcruft
+ cmp %g1, 0
+
+ be,a,pt %XCC, 1f
+ ldd [%o1 + 0x00], %f0
+
+1: ldd [%o1 + 0x08], %f2
+ add %o1, 0x8, %o1
+ sub %o2, 0x8, %o2
+ subcc %g2, 0x8, %g2
+ faligndata %f0, %f2, %f8
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8)
+ be,pn %XCC, endcruft
+ add %o0, 0x8, %o0
+ ldd [%o1 + 0x08], %f0
+ add %o1, 0x8, %o1
+ sub %o2, 0x8, %o2
+ subcc %g2, 0x8, %g2
+ faligndata %f2, %f0, %f8
+ EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8)
+ bne,pn %XCC, 1b
+ add %o0, 0x8, %o0
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-U3copy_to_user_endcruft:
+endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %icc, U3copy_to_user_short_ret
- nop
- ba,a,pt %xcc, U3copy_to_user_short
-
- /* If we get here, then 32 <= len < (6 * 64) */
-U3copy_to_user_toosmall:
-
-#ifdef SMALL_COPY_USES_FPU
-
- /* Is 'dst' already aligned on an 8-byte boundary? */
- be,pt %xcc, 2f ! BR Group
-
- /* Compute abs((dst & 7) - 8) into %g2. This is the number
- * of bytes to copy to make 'dst' 8-byte aligned. We pre-
- * subtract this from 'len'.
- */
- sub %g2, 0x8, %g2 ! A0
- sub %g0, %g2, %g2 ! A0 Group (reg-dep)
- sub %o2, %g2, %o2 ! A0 Group (reg-dep)
-
- /* Copy %g2 bytes from src to dst, one byte at a time. */
-1: ldub [%o1 + 0x00], %o3 ! MS (Group) (%o3 in 3 cycles)
- add %o1, 0x1, %o1 ! A1
- add %o0, 0x1, %o0 ! A0 Group
- subcc %g2, 0x1, %g2 ! A1
+ be,pn %XCC, out
+ sub %o0, %o1, %o3
- bg,pt %icc, 1b ! BR Group
- EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2) ! MS Group
+ andcc %g1, 0x7, %g0
+ bne,pn %icc, small_copy_unaligned
+ andcc %o2, 0x8, %g0
+ be,pt %icc, 1f
+ nop
+ ldx [%o1], %o5
+ EXNV(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
+ add %o1, 0x8, %o1
-2: VISEntryHalf ! MS+MS
+1: andcc %o2, 0x4, %g0
+ be,pt %icc, 1f
+ nop
+ lduw [%o1], %o5
+ EXNV(stwa %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x7)
+ add %o1, 0x4, %o1
- /* Compute (len - (len % 8)) into %g2. This is guaranteed
- * to be nonzero.
- */
- andn %o2, 0x7, %g2 ! A0 Group
-
- /* You may read this and believe that it allows reading
- * one 8-byte longword past the end of src. It actually
- * does not, as %g2 is subtracted as loads are done from
- * src, so we always stop before running off the end.
- * Also, we are guaranteed to have at least 0x10 bytes
- * to move here.
- */
- sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
- alignaddr %o1, %g0, %g1 ! MS (Break-after)
- ldd [%g1 + 0x00], %f0 ! MS Group (1-cycle stall)
- add %g1, 0x8, %g1 ! A0
-
-1: ldd [%g1 + 0x00], %f2 ! MS Group
- add %g1, 0x8, %g1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
-
- faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
- add %o1, 0x8, %o1 ! A0
- be,pn %icc, 2f ! BR
-
- add %o0, 0x8, %o0 ! A1
- ldd [%g1 + 0x00], %f0 ! MS Group
- add %g1, 0x8, %g1 ! A0
- sub %o2, 0x8, %o2 ! A1
-
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
- EX(stda %f8, [%o0 + 0x00] %asi, add %o2, 0x8) ! MS Group (2-cycle stall)
- add %o1, 0x8, %o1 ! A0
-
- bne,pn %icc, 1b ! BR
- add %o0, 0x8, %o0 ! A1
-
- /* Nothing left to copy? */
-2: cmp %o2, 0 ! A0 Group
- VISExitHalf ! A0+MS
- be,pn %icc, U3copy_to_user_short_ret ! BR Group
- nop ! A0
- ba,a,pt %xcc, U3copy_to_user_short ! BR Group
-
-#else /* !(SMALL_COPY_USES_FPU) */
-
- xor %o1, %o0, %g2
- andcc %g2, 0x7, %g0
- bne,pn %icc, U3copy_to_user_short
- andcc %o1, 0x7, %g2
-
- be,pt %xcc, 2f
- sub %g2, 0x8, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
+1: andcc %o2, 0x2, %g0
+ be,pt %icc, 1f
+ nop
+ lduh [%o1], %o5
+ EXNV(stha %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x3)
+ add %o1, 0x2, %o1
-1: ldub [%o1 + 0x00], %o3
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
- bg,pt %icc, 1b
- EXNV2(stba %o3, [%o0 + -1] %asi, add %o2, %g2)
+1: andcc %o2, 0x1, %g0
+ be,pt %icc, out
+ nop
+ ldub [%o1], %o5
+ ba,pt %xcc, out
+ EXNV(stba %o5, [%o1 + %o3] ASI_AIUS, and %o2, 0x1)
+
+medium_copy: /* 16 < len <= 64 */
+ bne,pn %XCC, small_copy_unaligned
+ sub %o0, %o1, %o3
+
+medium_copy_aligned:
+ andn %o2, 0x7, %o4
+ and %o2, 0x7, %o2
+1: subcc %o4, 0x8, %o4
+ ldx [%o1], %o5
+ EXNV4(stxa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %o4)
+ bgu,pt %XCC, 1b
+ add %o1, 0x8, %o1
+ andcc %o2, 0x4, %g0
+ be,pt %XCC, 1f
+ nop
+ sub %o2, 0x4, %o2
+ lduw [%o1], %o5
+ EXNV3(stwa %o5, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
+ add %o1, 0x4, %o1
+1: cmp %o2, 0
+ be,pt %XCC, out
+ nop
+ ba,pt %xcc, small_copy_unaligned
+ nop
-2: andn %o2, 0x7, %g2
- sub %o2, %g2, %o2
+small_copy: /* 0 < len <= 16 */
+ andcc %o3, 0x3, %g0
+ bne,pn %XCC, small_copy_unaligned
+ sub %o0, %o1, %o3
-3: ldx [%o1 + 0x00], %o3
- add %o1, 0x8, %o1
- add %o0, 0x8, %o0
- subcc %g2, 0x8, %g2
- bg,pt %icc, 3b
- EXNV3(stxa %o3, [%o0 + -8] %asi, add %o2, %g2)
+small_copy_aligned:
+ subcc %o2, 4, %o2
+ lduw [%o1], %g1
+ EXNV3(stwa %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
+ bgu,pt %XCC, small_copy_aligned
+ add %o1, 4, %o1
- cmp %o2, 0
- bne,pn %icc, U3copy_to_user_short
- nop
- ba,a,pt %xcc, U3copy_to_user_short_ret
+out: retl
+ clr %o0
-#endif /* !(SMALL_COPY_USES_FPU) */
+ .align 32
+small_copy_unaligned:
+ subcc %o2, 1, %o2
+ ldub [%o1], %g1
+ EXNV2(stba %g1, [%o1 + %o3] ASI_AIUS, add %o2, %g0)
+ bgu,pt %XCC, small_copy_unaligned
+ add %o1, 1, %o1
+ retl
+ clr %o0
-/* $Id: U3memcpy.S,v 1.2 2000/11/01 09:29:19 davem Exp $
- * U3memcpy.S: UltraSparc-III optimized memcpy.
+/* U3memcpy.S: UltraSparc-III optimized memcpy.
*
- * Copyright (C) 1999, 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
#ifdef __KERNEL__
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
-#undef SMALL_COPY_USES_FPU
#else
#define ASI_BLK_P 0xf0
#define FPRS_FEF 0x04
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
-#define SMALL_COPY_USES_FPU
#endif
+#ifndef XCC
+#define XCC xcc
+#endif
+
+ .register %g2,#scratch
+ .register %g3,#scratch
+
/* Special/non-trivial issues of this code:
*
* 1) %o5 is preserved from VISEntryHalf to VISExitHalf
* of up to 2.4GB per second.
*/
- .globl U3memcpy
-U3memcpy: /* %o0=dst, %o1=src, %o2=len */
-#ifndef __KERNEL__
- /* Save away original 'dst' for memcpy return value. */
- mov %o0, %g3 ! A0 Group
-#endif
- /* Anything to copy at all? */
- cmp %o2, 0 ! A1
- ble,pn %icc, U3memcpy_short_ret ! BR
-
- /* Extremely small copy? */
- cmp %o2, 31 ! A0 Group
- ble,pn %icc, U3memcpy_short ! BR
-
- /* Large enough to use unrolled prefetch loops? */
- cmp %o2, 0x100 ! A1
- bge,a,pt %icc, U3memcpy_enter ! BR Group
- andcc %o0, 0x3f, %g2 ! A0
-
- ba,pt %xcc, U3memcpy_toosmall ! BR Group
- andcc %o0, 0x7, %g2 ! A0
-
- .align 32
-U3memcpy_short:
- /* Copy %o2 bytes from src to dst, one byte at a time. */
- ldub [%o1 + 0x00], %o3 ! MS Group
- add %o1, 0x1, %o1 ! A0
- add %o0, 0x1, %o0 ! A1
- subcc %o2, 1, %o2 ! A0 Group
-
- bg,pt %icc, U3memcpy_short ! BR
- stb %o3, [%o0 + -1] ! MS Group (1-cycle stall)
-
-U3memcpy_short_ret:
-#ifdef __KERNEL__
- retl ! BR Group (0-4 cycle stall)
- clr %o0 ! A0
-#else
- retl ! BR Group (0-4 cycle stall)
- mov %g3, %o0 ! A0
-#endif
+ .globl U3memcpy
+U3memcpy: /* %o0=dst, %o1=src, %o2=len */
+ mov %o0, %g5
+ cmp %o2, 0
+ be,pn %XCC, out
+ or %o0, %o1, %o3
+ cmp %o2, 16
+ bleu,a,pn %XCC, small_copy
+ or %o3, %o2, %o3
- /* Here len >= (6 * 64) and condition codes reflect execution
+ cmp %o2, 256
+ blu,pt %XCC, medium_copy
+ andcc %o3, 0x7, %g0
+
+ ba,pt %xcc, enter
+ andcc %o0, 0x3f, %g2
+
+ /* Here len >= 256 and condition codes reflect execution
* of "andcc %o0, 0x7, %g2", done by caller.
*/
.align 64
-U3memcpy_enter:
+enter:
/* Is 'dst' already aligned on an 64-byte boundary? */
- be,pt %xcc, 2f ! BR
+ be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
- sub %g2, 0x40, %g2 ! A0 Group
- sub %g0, %g2, %g2 ! A0 Group
- sub %o2, %g2, %o2 ! A0 Group
+ sub %g2, 0x40, %g2
+ sub %g0, %g2, %g2
+ sub %o2, %g2, %o2
/* Copy %g2 bytes from src to dst, one byte at a time. */
-1: ldub [%o1 + 0x00], %o3 ! MS (Group)
- add %o1, 0x1, %o1 ! A1
- add %o0, 0x1, %o0 ! A0 Group
- subcc %g2, 0x1, %g2 ! A1
+1: ldub [%o1 + 0x00], %o3
+ add %o1, 0x1, %o1
+ add %o0, 0x1, %o0
+ subcc %g2, 0x1, %g2
- bg,pt %icc, 1b ! BR Group
- stb %o3, [%o0 + -1] ! MS Group
+ bg,pt %XCC, 1b
+ stb %o3, [%o0 + -1]
-2: VISEntryHalf ! MS+MS
- and %o1, 0x7, %g1 ! A1
- ba,pt %xcc, U3memcpy_begin ! BR
- alignaddr %o1, %g0, %o1 ! MS (Break-after)
+2: VISEntryHalf
+ and %o1, 0x7, %g1
+ ba,pt %xcc, begin
+ alignaddr %o1, %g0, %o1
.align 64
-U3memcpy_begin:
+begin:
#ifdef __KERNEL__
.globl U3memcpy_nop_1_6
U3memcpy_nop_1_6:
stxa %o3, [%g0] ASI_DCU_CONTROL_REG ! Enable P-cache
membar #Sync
#endif
- prefetch [%o1 + 0x000], #one_read ! MS Group1
- prefetch [%o1 + 0x040], #one_read ! MS Group2
- andn %o2, (0x40 - 1), %o4 ! A0
- prefetch [%o1 + 0x080], #one_read ! MS Group3
- cmp %o4, 0x140 ! A0
- prefetch [%o1 + 0x0c0], #one_read ! MS Group4
- ldd [%o1 + 0x000], %f0 ! MS Group5 (%f0 results at G8)
- bge,a,pt %icc, 1f ! BR
-
- prefetch [%o1 + 0x100], #one_read ! MS Group6
-1: ldd [%o1 + 0x008], %f2 ! AX (%f2 results at G9)
- cmp %o4, 0x180 ! A1
- bge,a,pt %icc, 1f ! BR
- prefetch [%o1 + 0x140], #one_read ! MS Group7
-1: ldd [%o1 + 0x010], %f4 ! AX (%f4 results at G10)
- cmp %o4, 0x1c0 ! A1
- bge,a,pt %icc, 1f ! BR
-
- prefetch [%o1 + 0x180], #one_read ! MS Group8
-1: faligndata %f0, %f2, %f16 ! FGA Group9 (%f16 at G12)
- ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G12)
- faligndata %f2, %f4, %f18 ! FGA Group10 (%f18 results at G13)
- ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G13)
- faligndata %f4, %f6, %f20 ! FGA Group12 (1-cycle stall,%f20 at G15)
- ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G15)
- faligndata %f6, %f8, %f22 ! FGA Group13 (%f22 results at G16)
-
- ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G16)
- faligndata %f8, %f10, %f24 ! FGA Group15 (1-cycle stall,%f24 at G18)
- ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
- faligndata %f10, %f12, %f26 ! FGA Group16 (%f26 results at G19)
- ldd [%o1 + 0x040], %f0 ! MS (%f0 results at G19)
-
- /* We only use the first loop if len > (7 * 64). */
- subcc %o4, 0x1c0, %o4 ! A0 Group17
- bg,pt %icc, U3memcpy_loop1 ! BR
- add %o1, 0x40, %o1 ! A1
-
- add %o4, 0x140, %o4 ! A0 Group18
- ba,pt %xcc, U3memcpy_loop2 ! BR
- srl %o4, 6, %o3 ! A0 Group19
- nop
- nop
- nop
- nop
- nop
-
- nop
- nop
-
- /* This loop performs the copy and queues new prefetches.
- * We drop into the second loop when len <= (5 * 64). Note
- * that this (5 * 64) factor has been subtracted from len
- * already.
- */
-U3memcpy_loop1:
- ldd [%o1 + 0x008], %f2 ! MS Group2 (%f2 results at G5)
- faligndata %f12, %f14, %f28 ! FGA (%f28 results at G5)
- ldd [%o1 + 0x010], %f4 ! MS Group3 (%f4 results at G6)
- faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall, %f30 at G7)
- stda %f16, [%o0] ASI_BLK_P ! MS
- ldd [%o1 + 0x018], %f6 ! AX (%f6 results at G7)
-
- faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
- ldd [%o1 + 0x020], %f8 ! MS (%f8 results at G15)
- faligndata %f2, %f4, %f18 ! FGA Group13 (%f18 results at G16)
- ldd [%o1 + 0x028], %f10 ! MS (%f10 results at G16)
- faligndata %f4, %f6, %f20 ! FGA Group14 (%f20 results at G17)
- ldd [%o1 + 0x030], %f12 ! MS (%f12 results at G17)
- faligndata %f6, %f8, %f22 ! FGA Group15 (%f22 results at G18)
- ldd [%o1 + 0x038], %f14 ! MS (%f14 results at G18)
-
- faligndata %f8, %f10, %f24 ! FGA Group16 (%f24 results at G19)
- ldd [%o1 + 0x040], %f0 ! AX (%f0 results at G19)
- prefetch [%o1 + 0x180], #one_read ! MS
- faligndata %f10, %f12, %f26 ! FGA Group17 (%f26 results at G20)
- subcc %o4, 0x40, %o4 ! A0
- add %o1, 0x40, %o1 ! A1
- bg,pt %xcc, U3memcpy_loop1 ! BR
- add %o0, 0x40, %o0 ! A0 Group18
-
-U3memcpy_loop2_enter:
- mov 5, %o3 ! A1
-
- /* This loop performs on the copy, no new prefetches are
- * queued. We do things this way so that we do not perform
- * any spurious prefetches past the end of the src buffer.
- */
-U3memcpy_loop2:
- ldd [%o1 + 0x008], %f2 ! MS
- faligndata %f12, %f14, %f28 ! FGA Group2
- ldd [%o1 + 0x010], %f4 ! MS
- faligndata %f14, %f0, %f30 ! FGA Group4 (1-cycle stall)
- stda %f16, [%o0] ASI_BLK_P ! MS
- ldd [%o1 + 0x018], %f6 ! AX
- faligndata %f0, %f2, %f16 ! FGA Group12 (7-cycle stall)
-
- ldd [%o1 + 0x020], %f8 ! MS
- faligndata %f2, %f4, %f18 ! FGA Group13
- ldd [%o1 + 0x028], %f10 ! MS
- faligndata %f4, %f6, %f20 ! FGA Group14
- ldd [%o1 + 0x030], %f12 ! MS
- faligndata %f6, %f8, %f22 ! FGA Group15
- ldd [%o1 + 0x038], %f14 ! MS
- faligndata %f8, %f10, %f24 ! FGA Group16
-
- ldd [%o1 + 0x040], %f0 ! AX
- faligndata %f10, %f12, %f26 ! FGA Group17
- subcc %o3, 0x01, %o3 ! A0
- add %o1, 0x40, %o1 ! A1
- bg,pt %xcc, U3memcpy_loop2 ! BR
- add %o0, 0x40, %o0 ! A0 Group18
+ prefetch [%o1 + 0x000], #one_read
+ prefetch [%o1 + 0x040], #one_read
+ andn %o2, (0x40 - 1), %o4
+ prefetch [%o1 + 0x080], #one_read
+ prefetch [%o1 + 0x0c0], #one_read
+ ldd [%o1 + 0x000], %f0
+ prefetch [%o1 + 0x100], #one_read
+ ldd [%o1 + 0x008], %f2
+ prefetch [%o1 + 0x140], #one_read
+ ldd [%o1 + 0x010], %f4
+ prefetch [%o1 + 0x180], #one_read
+ faligndata %f0, %f2, %f16
+ ldd [%o1 + 0x018], %f6
+ faligndata %f2, %f4, %f18
+ ldd [%o1 + 0x020], %f8
+ faligndata %f4, %f6, %f20
+ ldd [%o1 + 0x028], %f10
+ faligndata %f6, %f8, %f22
+
+ ldd [%o1 + 0x030], %f12
+ faligndata %f8, %f10, %f24
+ ldd [%o1 + 0x038], %f14
+ faligndata %f10, %f12, %f26
+ ldd [%o1 + 0x040], %f0
+
+ sub %o4, 0x80, %o4
+ add %o1, 0x40, %o1
+ ba,pt %xcc, loop
+ srl %o4, 6, %o3
+
+ .align 64
+loop:
+ ldd [%o1 + 0x008], %f2
+ faligndata %f12, %f14, %f28
+ ldd [%o1 + 0x010], %f4
+ faligndata %f14, %f0, %f30
+ stda %f16, [%o0] ASI_BLK_P
+ ldd [%o1 + 0x018], %f6
+ faligndata %f0, %f2, %f16
+
+ ldd [%o1 + 0x020], %f8
+ faligndata %f2, %f4, %f18
+ ldd [%o1 + 0x028], %f10
+ faligndata %f4, %f6, %f20
+ ldd [%o1 + 0x030], %f12
+ faligndata %f6, %f8, %f22
+ ldd [%o1 + 0x038], %f14
+ faligndata %f8, %f10, %f24
+
+ ldd [%o1 + 0x040], %f0
+ prefetch [%o1 + 0x180], #one_read
+ faligndata %f10, %f12, %f26
+ subcc %o3, 0x01, %o3
+ add %o1, 0x40, %o1
+ bg,pt %XCC, loop
+ add %o0, 0x40, %o0
/* Finally we copy the last full 64-byte block. */
-U3memcpy_loopfini:
- ldd [%o1 + 0x008], %f2 ! MS
- faligndata %f12, %f14, %f28 ! FGA
- ldd [%o1 + 0x010], %f4 ! MS Group19
- faligndata %f14, %f0, %f30 ! FGA
- stda %f16, [%o0] ASI_BLK_P ! MS Group20
- ldd [%o1 + 0x018], %f6 ! AX
- faligndata %f0, %f2, %f16 ! FGA Group11 (7-cycle stall)
- ldd [%o1 + 0x020], %f8 ! MS
- faligndata %f2, %f4, %f18 ! FGA Group12
- ldd [%o1 + 0x028], %f10 ! MS
- faligndata %f4, %f6, %f20 ! FGA Group13
- ldd [%o1 + 0x030], %f12 ! MS
- faligndata %f6, %f8, %f22 ! FGA Group14
- ldd [%o1 + 0x038], %f14 ! MS
- faligndata %f8, %f10, %f24 ! FGA Group15
- cmp %g1, 0 ! A0
- be,pt %icc, 1f ! BR
- add %o0, 0x40, %o0 ! A1
- ldd [%o1 + 0x040], %f0 ! MS
-1: faligndata %f10, %f12, %f26 ! FGA Group16
- faligndata %f12, %f14, %f28 ! FGA Group17
- faligndata %f14, %f0, %f30 ! FGA Group18
- stda %f16, [%o0] ASI_BLK_P ! MS
- add %o0, 0x40, %o0 ! A0
- add %o1, 0x40, %o1 ! A1
+loopfini:
+ ldd [%o1 + 0x008], %f2
+ faligndata %f12, %f14, %f28
+ ldd [%o1 + 0x010], %f4
+ faligndata %f14, %f0, %f30
+ stda %f16, [%o0] ASI_BLK_P
+ ldd [%o1 + 0x018], %f6
+ faligndata %f0, %f2, %f16
+ ldd [%o1 + 0x020], %f8
+ faligndata %f2, %f4, %f18
+ ldd [%o1 + 0x028], %f10
+ faligndata %f4, %f6, %f20
+ ldd [%o1 + 0x030], %f12
+ faligndata %f6, %f8, %f22
+ ldd [%o1 + 0x038], %f14
+ faligndata %f8, %f10, %f24
+ cmp %g1, 0
+ be,pt %XCC, 1f
+ add %o0, 0x40, %o0
+ ldd [%o1 + 0x040], %f0
+1: faligndata %f10, %f12, %f26
+ faligndata %f12, %f14, %f28
+ faligndata %f14, %f0, %f30
+ stda %f16, [%o0] ASI_BLK_P
+ add %o0, 0x40, %o0
+ add %o1, 0x40, %o1
#ifdef __KERNEL__
.globl U3memcpy_nop_2_3
U3memcpy_nop_2_3:
stxa %g0, [%o3] ASI_DMMU ! Flush P-cache
stxa %g3, [%g0] ASI_DCU_CONTROL_REG ! Disable P-cache
#endif
- membar #Sync ! MS Group26 (7-cycle stall)
+ membar #Sync
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
- * load past the end of the src buffer just like similar
- * code found in U3memcpy_toosmall processing.
+ * load past the end of the src buffer.
*/
-U3memcpy_loopend:
- and %o2, 0x3f, %o2 ! A0 Group
- andcc %o2, 0x38, %g2 ! A0 Group
- be,pn %icc, U3memcpy_endcruft ! BR
- subcc %g2, 0x8, %g2 ! A1
- be,pn %icc, U3memcpy_endcruft ! BR Group
- cmp %g1, 0 ! A0
-
- be,a,pt %icc, 1f ! BR Group
- ldd [%o1 + 0x00], %f0 ! MS
-
-1: ldd [%o1 + 0x08], %f2 ! MS Group
- add %o1, 0x8, %o1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f0, %f2, %f8 ! FGA Group
- std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
- be,pn %icc, U3memcpy_endcruft ! BR
- add %o0, 0x8, %o0 ! A0
- ldd [%o1 + 0x08], %f0 ! MS Group
- add %o1, 0x8, %o1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f2, %f0, %f8 ! FGA
- std %f8, [%o0 + 0x00] ! MS (XXX does it stall here? XXX)
- bne,pn %icc, 1b ! BR
- add %o0, 0x8, %o0 ! A0 Group
+loopend:
+ and %o2, 0x3f, %o2
+ andcc %o2, 0x38, %g2
+ be,pn %XCC, endcruft
+ subcc %g2, 0x8, %g2
+ be,pn %XCC, endcruft
+ cmp %g1, 0
+
+ be,a,pt %XCC, 1f
+ ldd [%o1 + 0x00], %f0
+
+1: ldd [%o1 + 0x08], %f2
+ add %o1, 0x8, %o1
+ sub %o2, 0x8, %o2
+ subcc %g2, 0x8, %g2
+ faligndata %f0, %f2, %f8
+ std %f8, [%o0 + 0x00]
+ be,pn %XCC, endcruft
+ add %o0, 0x8, %o0
+ ldd [%o1 + 0x08], %f0
+ add %o1, 0x8, %o1
+ sub %o2, 0x8, %o2
+ subcc %g2, 0x8, %g2
+ faligndata %f2, %f0, %f8
+ std %f8, [%o0 + 0x00]
+ bne,pn %XCC, 1b
+ add %o0, 0x8, %o0
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
-U3memcpy_endcruft:
+endcruft:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
- be,pn %icc, U3memcpy_short_ret
- nop
- ba,a,pt %xcc, U3memcpy_short
+ be,pn %XCC, out
+ sub %o0, %o1, %o3
- /* If we get here, then 32 <= len < (6 * 64) */
-U3memcpy_toosmall:
+ andcc %g1, 0x7, %g0
+ bne,pn %icc, small_copy_unaligned
+ andcc %o2, 0x8, %g0
+ be,pt %icc, 1f
+ nop
+ ldx [%o1], %o5
+ stx %o5, [%o1 + %o3]
+ add %o1, 0x8, %o1
-#ifdef SMALL_COPY_USES_FPU
+1: andcc %o2, 0x4, %g0
+ be,pt %icc, 1f
+ nop
+ lduw [%o1], %o5
+ stw %o5, [%o1 + %o3]
+ add %o1, 0x4, %o1
- /* Is 'dst' already aligned on an 8-byte boundary? */
- be,pt %xcc, 2f ! BR Group
+1: andcc %o2, 0x2, %g0
+ be,pt %icc, 1f
+ nop
+ lduh [%o1], %o5
+ sth %o5, [%o1 + %o3]
+ add %o1, 0x2, %o1
- /* Compute abs((dst & 7) - 8) into %g2. This is the number
- * of bytes to copy to make 'dst' 8-byte aligned. We pre-
- * subtract this from 'len'.
- */
- sub %g2, 0x8, %g2 ! A0
- sub %g0, %g2, %g2 ! A0 Group (reg-dep)
- sub %o2, %g2, %o2 ! A0 Group (reg-dep)
+1: andcc %o2, 0x1, %g0
+ be,pt %icc, out
+ nop
+ ldub [%o1], %o5
+ ba,pt %xcc, out
+ stb %o5, [%o1 + %o3]
+
+medium_copy: /* 16 < len <= 64 */
+ bne,pn %XCC, small_copy_unaligned
+ sub %o0, %o1, %o3
+
+medium_copy_aligned:
+ andn %o2, 0x7, %o4
+ and %o2, 0x7, %o2
+1: subcc %o4, 0x8, %o4
+ ldx [%o1], %o5
+ stx %o5, [%o1 + %o3]
+ bgu,pt %XCC, 1b
+ add %o1, 0x8, %o1
+ andcc %o2, 0x4, %g0
+ be,pt %XCC, 1f
+ nop
+ sub %o2, 0x4, %o2
+ lduw [%o1], %o5
+ stw %o5, [%o1 + %o3]
+ add %o1, 0x4, %o1
+1: cmp %o2, 0
+ be,pt %XCC, out
+ nop
+ ba,pt %xcc, small_copy_unaligned
+ nop
- /* Copy %g2 bytes from src to dst, one byte at a time. */
-1: ldub [%o1 + 0x00], %o3 ! MS (Group) (%o3 in 3 cycles)
- add %o1, 0x1, %o1 ! A1
- add %o0, 0x1, %o0 ! A0 Group
- subcc %g2, 0x1, %g2 ! A1
+small_copy: /* 0 < len <= 16 */
+ andcc %o3, 0x3, %g0
+ bne,pn %XCC, small_copy_unaligned
+ sub %o0, %o1, %o3
- bg,pt %icc, 1b ! BR Group
- stb %o3, [%o0 + -1] ! MS Group
+small_copy_aligned:
+ subcc %o2, 4, %o2
+ lduw [%o1], %g1
+ stw %g1, [%o1 + %o3]
+ bgu,pt %XCC, small_copy_aligned
+ add %o1, 4, %o1
-2: VISEntryHalf ! MS+MS
+out: retl
+ mov %g5, %o0
- /* Compute (len - (len % 8)) into %g2. This is guaranteed
- * to be nonzero.
- */
- andn %o2, 0x7, %g2 ! A0 Group
-
- /* You may read this and believe that it allows reading
- * one 8-byte longword past the end of src. It actually
- * does not, as %g2 is subtracted as loads are done from
- * src, so we always stop before running off the end.
- * Also, we are guaranteed to have at least 0x10 bytes
- * to move here.
+ .align 32
+small_copy_unaligned:
+ subcc %o2, 1, %o2
+ ldub [%o1], %g1
+ stb %g1, [%o1 + %o3]
+ bgu,pt %XCC, small_copy_unaligned
+ add %o1, 1, %o1
+ retl
+ mov %g5, %o0
+
+ /* Act like copy_{to,in}_user(), ie. return zero instead
+ * of original destination pointer. This is invoked when
+ * copy_{to,in}_user() finds that %asi is kernel space.
*/
- sub %g2, 0x8, %g2 ! A0 Group (reg-dep)
- alignaddr %o1, %g0, %g1 ! MS (Break-after)
- ldd [%g1 + 0x00], %f0 ! MS Group (1-cycle stall)
- add %g1, 0x8, %g1 ! A0
-
-1: ldd [%g1 + 0x00], %f2 ! MS Group
- add %g1, 0x8, %g1 ! A0
- sub %o2, 0x8, %o2 ! A1
- subcc %g2, 0x8, %g2 ! A0 Group
-
- faligndata %f0, %f2, %f8 ! FGA Group (1-cycle stall)
- std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
- add %o1, 0x8, %o1 ! A0
- be,pn %icc, 2f ! BR
-
- add %o0, 0x8, %o0 ! A1
- ldd [%g1 + 0x00], %f0 ! MS Group
- add %g1, 0x8, %g1 ! A0
- sub %o2, 0x8, %o2 ! A1
-
- subcc %g2, 0x8, %g2 ! A0 Group
- faligndata %f2, %f0, %f8 ! FGA Group (1-cycle stall)
- std %f8, [%o0 + 0x00] ! MS Group (2-cycle stall)
- add %o1, 0x8, %o1 ! A0
-
- bne,pn %icc, 1b ! BR
- add %o0, 0x8, %o0 ! A1
-
- /* Nothing left to copy? */
-2: cmp %o2, 0 ! A0 Group
- VISExitHalf ! A0+MS
- be,pn %icc, U3memcpy_short_ret ! BR Group
- nop ! A0
- ba,a,pt %xcc, U3memcpy_short ! BR Group
-
-#else /* !(SMALL_COPY_USES_FPU) */
-
- xor %o1, %o0, %g2
- andcc %g2, 0x7, %g0
- bne,pn %icc, U3memcpy_short
- andcc %o1, 0x7, %g2
-
- be,pt %xcc, 2f
- sub %g2, 0x8, %g2
- sub %g0, %g2, %g2
- sub %o2, %g2, %o2
-
-1: ldub [%o1 + 0x00], %o3
- add %o1, 0x1, %o1
- add %o0, 0x1, %o0
- subcc %g2, 0x1, %g2
- bg,pt %icc, 1b
- stb %o3, [%o0 + -1]
-
-2: andn %o2, 0x7, %g2
- sub %o2, %g2, %o2
-
-3: ldx [%o1 + 0x00], %o3
- add %o1, 0x8, %o1
- add %o0, 0x8, %o0
- subcc %g2, 0x8, %g2
- bg,pt %icc, 3b
- stx %o3, [%o0 + -8]
-
- cmp %o2, 0
- bne,pn %icc, U3memcpy_short
- nop
- ba,a,pt %xcc, U3memcpy_short_ret
-
-#endif /* !(SMALL_COPY_USES_FPU) */
+ .globl U3memcpy_user_stub
+U3memcpy_user_stub:
+ save %sp, -192, %sp
+ mov %i0, %o0
+ mov %i1, %o1
+ call U3memcpy
+ mov %i2, %o2
+ ret
+ restore %g0, %g0, %o0
.globl __memcpy_begin
__memcpy_begin:
- .globl __memcpy
- .type __memcpy,@function
-
memcpy_private:
-__memcpy:
memcpy: mov ASI_P, asi_src ! IEU0 Group
brnz,pt %o2, __memcpy_entry ! CTI
mov ASI_P, asi_dest ! IEU1
.text
.align 64
+ .globl _raw_spin_lock
+_raw_spin_lock: /* %o0 = lock_ptr */
+1: ldstub [%o0], %g7
+ brnz,pn %g7, 2f
+ membar #StoreLoad | #StoreStore
+ retl
+ nop
+2: ldub [%o0], %g7
+ brnz,pt %g7, 2b
+ membar #LoadLoad
+ ba,a,pt %xcc, 1b
+
.globl _raw_spin_lock_flags
_raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */
1: ldstub [%o0], %g7
__asm__ ("srl %0, 0, %0" \
: "=r" (__ret) \
: "0" (__x)); \
- __ret; \
+ (void __user *)__ret; \
})
extern unsigned sys_call_table[];
#define UFSMAGIC (((unsigned)'u'<<24)||((unsigned)'f'<<16)||((unsigned)'s'<<8))
-static inline int putstat(struct sol_stat *ubuf, struct kstat *kbuf)
+static inline int putstat(struct sol_stat __user *ubuf, struct kstat *kbuf)
{
if (kbuf->size > MAX_NON_LFS ||
!sysv_valid_dev(kbuf->dev) ||
__put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime.tv_nsec) ||
__put_user (kbuf->blksize, &ubuf->st_blksize) ||
__put_user (kbuf->blocks, &ubuf->st_blocks) ||
- __put_user (UFSMAGIC, (unsigned *)ubuf->st_fstype))
+ __put_user (UFSMAGIC, (unsigned __user *)ubuf->st_fstype))
return -EFAULT;
return 0;
}
-static inline int putstat64(struct sol_stat64 *ubuf, struct kstat *kbuf)
+static inline int putstat64(struct sol_stat64 __user *ubuf, struct kstat *kbuf)
{
if (!sysv_valid_dev(kbuf->dev) || !sysv_valid_dev(kbuf->rdev))
return -EOVERFLOW;
__put_user (kbuf->ctime.tv_nsec, &ubuf->st_ctime.tv_nsec) ||
__put_user (kbuf->blksize, &ubuf->st_blksize) ||
__put_user (kbuf->blocks, &ubuf->st_blocks) ||
- __put_user (UFSMAGIC, (unsigned *)ubuf->st_fstype))
+ __put_user (UFSMAGIC, (unsigned __user *)ubuf->st_fstype))
return -EFAULT;
return 0;
}
asmlinkage int solaris_stat(u32 filename, u32 statbuf)
{
- int ret;
struct kstat s;
- char *filenam;
- mm_segment_t old_fs = get_fs();
-
- filenam = getname ((char *)A(filename));
- ret = PTR_ERR(filenam);
- if (!IS_ERR(filenam)) {
- set_fs (KERNEL_DS);
- ret = vfs_stat(filenam, &s);
- set_fs (old_fs);
- putname (filenam);
- return putstat((struct sol_stat *)A(statbuf), &s);
- }
+ int ret = vfs_stat(A(filename), &s);
+ if (!ret)
+ return putstat(A(statbuf), &s);
return ret;
}
asmlinkage int solaris_stat64(u32 filename, u32 statbuf)
{
- int ret;
struct kstat s;
- char *filenam;
- mm_segment_t old_fs = get_fs();
-
- filenam = getname ((char *)A(filename));
- ret = PTR_ERR(filenam);
- if (!IS_ERR(filenam)) {
- set_fs (KERNEL_DS);
- ret = vfs_stat(filenam, &s);
- set_fs (old_fs);
- putname (filenam);
- return putstat64((struct sol_stat64 *)A(statbuf), &s);
- }
+ int ret = vfs_stat(A(filename), &s);
+ if (!ret)
+ return putstat64(A(statbuf), &s);
return ret;
}
asmlinkage int solaris_lstat(u32 filename, u32 statbuf)
{
- int ret;
struct kstat s;
- char *filenam;
- mm_segment_t old_fs = get_fs();
-
- filenam = getname ((char *)A(filename));
- ret = PTR_ERR(filenam);
- if (!IS_ERR(filenam)) {
- set_fs (KERNEL_DS);
- ret = vfs_lstat(filenam, &s);
- set_fs (old_fs);
- putname (filenam);
- return putstat((struct sol_stat *)A(statbuf), &s);
- }
+ int ret = vfs_lstat(A(filename), &s);
+ if (!ret)
+ return putstat(A(statbuf), &s);
return ret;
}
asmlinkage int solaris_lstat64(u32 filename, u32 statbuf)
{
- int ret;
struct kstat s;
- char *filenam;
- mm_segment_t old_fs = get_fs();
-
- filenam = getname ((char *)A(filename));
- ret = PTR_ERR(filenam);
- if (!IS_ERR(filenam)) {
- set_fs (KERNEL_DS);
- ret = vfs_lstat(filenam, &s);
- set_fs (old_fs);
- putname (filenam);
- return putstat64((struct sol_stat64 *)A(statbuf), &s);
- }
+ int ret = vfs_lstat(A(filename), &s);
+ if (!ret)
+ return putstat64(A(statbuf), &s);
return ret;
}
asmlinkage int solaris_fstat(unsigned int fd, u32 statbuf)
{
- int ret;
struct kstat s;
- ret = vfs_fstat(fd, &s);
+ int ret = vfs_fstat(fd, &s);
if (!ret)
- return putstat((struct sol_stat *)A(statbuf), &s);
+ return putstat(A(statbuf), &s);
return ret;
}
asmlinkage int solaris_fstat64(unsigned int fd, u32 statbuf)
{
- int ret;
struct kstat s;
-
- ret = vfs_fstat(fd, &s);
+ int ret = vfs_fstat(fd, &s);
if (!ret)
- return putstat64((struct sol_stat64 *)A(statbuf), &s);
+ return putstat64(A(statbuf), &s);
return ret;
}
asmlinkage int solaris_mknod(u32 path, u32 mode, s32 dev)
{
- int (*sys_mknod)(const char *,int,unsigned) =
- (int (*)(const char *,int,unsigned))SYS(mknod);
+ int (*sys_mknod)(const char __user *,int,unsigned) =
+ (int (*)(const char __user *,int,unsigned))SYS(mknod);
int major = sysv_major(dev);
int minor = sysv_minor(dev);
/* minor is guaranteed to be OK for MKDEV, major might be not */
if (major > 0xfff)
return -EINVAL;
- return sys_mknod((const char *)A(path), mode,
- new_encode_dev(MKDEV(major,minor)));
+ return sys_mknod(A(path), mode, new_encode_dev(MKDEV(major,minor)));
}
asmlinkage int solaris_xmknod(int vers, u32 path, u32 mode, s32 dev)
return solaris_mknod(path, mode, dev);
}
-asmlinkage int solaris_getdents64(unsigned int fd, void *dirent, unsigned int count)
+asmlinkage int solaris_getdents64(unsigned int fd, void __user *dirent, unsigned int count)
{
- int (*sys_getdents)(unsigned int, void *, unsigned int) =
- (int (*)(unsigned int, void *, unsigned int))SYS(getdents);
+ int (*sys_getdents)(unsigned int, void __user *, unsigned int) =
+ (int (*)(unsigned int, void __user *, unsigned int))SYS(getdents);
return sys_getdents(fd, dirent, count);
}
int ret;
struct statfs s;
mm_segment_t old_fs = get_fs();
- int (*sys_statfs)(const char *,struct statfs *) =
- (int (*)(const char *,struct statfs *))SYS(statfs);
- struct sol_statfs *ss = (struct sol_statfs *)A(buf);
+ int (*sys_statfs)(const char __user *,struct statfs __user *) =
+ (int (*)(const char __user *,struct statfs __user *))SYS(statfs);
+ struct sol_statfs __user *ss = A(buf);
if (len != sizeof(struct sol_statfs)) return -EINVAL;
if (!fstype) {
+ /* FIXME: mixing userland and kernel pointers */
set_fs (KERNEL_DS);
- ret = sys_statfs((const char *)A(path), &s);
+ ret = sys_statfs(A(path), &s);
set_fs (old_fs);
if (!ret) {
if (put_user (s.f_type, &ss->f_type) ||
int ret;
struct statfs s;
mm_segment_t old_fs = get_fs();
- int (*sys_fstatfs)(unsigned,struct statfs *) =
- (int (*)(unsigned,struct statfs *))SYS(fstatfs);
- struct sol_statfs *ss = (struct sol_statfs *)A(buf);
+ int (*sys_fstatfs)(unsigned,struct statfs __user *) =
+ (int (*)(unsigned,struct statfs __user *))SYS(fstatfs);
+ struct sol_statfs __user *ss = A(buf);
if (len != sizeof(struct sol_statfs)) return -EINVAL;
if (!fstype) {
{
struct kstatfs s;
int error;
- struct sol_statvfs *ss = (struct sol_statvfs *)A(buf);
+ struct sol_statvfs __user *ss = A(buf);
error = vfs_statfs(mnt->mnt_sb, &s);
if (!error) {
__put_user (s.f_ffree, &ss->f_favail) ||
__put_user (sysv_encode_dev(inode->i_sb->s_dev), &ss->f_fsid) ||
__copy_to_user (ss->f_basetype,p,j) ||
- __put_user (0, (char *)&ss->f_basetype[j]) ||
+ __put_user (0, (char __user *)&ss->f_basetype[j]) ||
__put_user (s.f_namelen, &ss->f_namemax) ||
__put_user (i, &ss->f_flag) ||
__clear_user (&ss->f_fstr, 32))
{
struct kstatfs s;
int error;
- struct sol_statvfs64 *ss = (struct sol_statvfs64 *)A(buf);
+ struct sol_statvfs64 __user *ss = A(buf);
error = vfs_statfs(mnt->mnt_sb, &s);
if (!error) {
__put_user (s.f_ffree, &ss->f_favail) ||
__put_user (sysv_encode_dev(inode->i_sb->s_dev), &ss->f_fsid) ||
__copy_to_user (ss->f_basetype,p,j) ||
- __put_user (0, (char *)&ss->f_basetype[j]) ||
+ __put_user (0, (char __user *)&ss->f_basetype[j]) ||
__put_user (s.f_namelen, &ss->f_namemax) ||
__put_user (i, &ss->f_flag) ||
__clear_user (&ss->f_fstr, 32))
struct nameidata nd;
int error;
- error = user_path_walk((const char *)A(path),&nd);
+ error = user_path_walk(A(path),&nd);
if (!error) {
struct inode * inode = nd.dentry->d_inode;
error = report_statvfs(nd.mnt, inode, buf);
int error;
lock_kernel();
- error = user_path_walk((const char *)A(path), &nd);
+ error = user_path_walk(A(path), &nd);
if (!error) {
struct inode * inode = nd.dentry->d_inode;
error = report_statvfs64(nd.mnt, inode, buf);
case SOL_F_SETLKW:
{
struct flock f;
+ struct sol_flock __user *p = A(arg);
mm_segment_t old_fs = get_fs();
switch (cmd) {
case SOL_F_SETLKW: cmd = F_SETLKW; break;
}
- if (get_user (f.l_type, &((struct sol_flock *)A(arg))->l_type) ||
- __get_user (f.l_whence, &((struct sol_flock *)A(arg))->l_whence) ||
- __get_user (f.l_start, &((struct sol_flock *)A(arg))->l_start) ||
- __get_user (f.l_len, &((struct sol_flock *)A(arg))->l_len) ||
- __get_user (f.l_pid, &((struct sol_flock *)A(arg))->l_sysid))
+ if (get_user (f.l_type, &p->l_type) ||
+ __get_user (f.l_whence, &p->l_whence) ||
+ __get_user (f.l_start, &p->l_start) ||
+ __get_user (f.l_len, &p->l_len) ||
+ __get_user (f.l_pid, &p->l_sysid))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_fcntl(fd, cmd, (unsigned long)&f);
set_fs(old_fs);
- if (__put_user (f.l_type, &((struct sol_flock *)A(arg))->l_type) ||
- __put_user (f.l_whence, &((struct sol_flock *)A(arg))->l_whence) ||
- __put_user (f.l_start, &((struct sol_flock *)A(arg))->l_start) ||
- __put_user (f.l_len, &((struct sol_flock *)A(arg))->l_len) ||
- __put_user (f.l_pid, &((struct sol_flock *)A(arg))->l_pid) ||
- __put_user (0, &((struct sol_flock *)A(arg))->l_sysid))
+ if (__put_user (f.l_type, &p->l_type) ||
+ __put_user (f.l_whence, &p->l_whence) ||
+ __put_user (f.l_start, &p->l_start) ||
+ __put_user (f.l_len, &p->l_len) ||
+ __put_user (f.l_pid, &p->l_pid) ||
+ __put_user (0, &p->l_sysid))
return -EFAULT;
return ret;
int (*sys_newftruncate)(unsigned int, unsigned long)=
(int (*)(unsigned int, unsigned long))SYS(ftruncate);
- if (get_user(length, &((struct sol_flock*)A(arg))->l_start))
+ if (get_user(length, &((struct sol_flock __user *)A(arg))->l_start))
return -EFAULT;
return sys_newftruncate(fd, length);
return -ENOSYS;
}
-asmlinkage int solaris_pread(unsigned int fd, char *buf, u32 count, u32 pos)
+asmlinkage int solaris_pread(unsigned int fd, char __user *buf, u32 count, u32 pos)
{
- ssize_t (*sys_pread64)(unsigned int, char *, size_t, loff_t) =
- (ssize_t (*)(unsigned int, char *, size_t, loff_t))SYS(pread64);
+ ssize_t (*sys_pread64)(unsigned int, char __user *, size_t, loff_t) =
+ (ssize_t (*)(unsigned int, char __user *, size_t, loff_t))SYS(pread64);
return sys_pread64(fd, buf, count, (loff_t)pos);
}
-asmlinkage int solaris_pwrite(unsigned int fd, char *buf, u32 count, u32 pos)
+asmlinkage int solaris_pwrite(unsigned int fd, char __user *buf, u32 count, u32 pos)
{
- ssize_t (*sys_pwrite64)(unsigned int, char *, size_t, loff_t) =
- (ssize_t (*)(unsigned int, char *, size_t, loff_t))SYS(pwrite64);
+ ssize_t (*sys_pwrite64)(unsigned int, char __user *, size_t, loff_t) =
+ (ssize_t (*)(unsigned int, char __user *, size_t, loff_t))SYS(pwrite64);
return sys_pwrite64(fd, buf, count, (loff_t)pos);
}
/* solaris_llseek returns long long - quite difficult */
asmlinkage long solaris_llseek(struct pt_regs *regs, u32 off_hi, u32 off_lo, int whence)
{
- int (*sys_llseek)(unsigned int, unsigned long, unsigned long, loff_t *, unsigned int) =
- (int (*)(unsigned int, unsigned long, unsigned long, loff_t *, unsigned int))SYS(_llseek);
+ int (*sys_llseek)(unsigned int, unsigned long, unsigned long, loff_t __user *, unsigned int) =
+ (int (*)(unsigned int, unsigned long, unsigned long, loff_t __user *, unsigned int))SYS(_llseek);
int ret;
mm_segment_t old_fs = get_fs();
loff_t retval;
/* Have to mask out all but lower 3 bits */
asmlinkage int solaris_access(u32 filename, long mode)
{
- int (*sys_access)(const char *, int) =
- (int (*)(const char *, int))SYS(access);
+ int (*sys_access)(const char __user *, int) =
+ (int (*)(const char __user *, int))SYS(access);
- return sys_access((const char *)A(filename), mode & 7);
+ return sys_access(A(filename), mode & 7);
}
u32 arg);
asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
-extern int timod_putmsg(unsigned int fd, char *ctl_buf, int ctl_len,
- char *data_buf, int data_len, int flags);
-extern int timod_getmsg(unsigned int fd, char *ctl_buf, int ctl_maxlen, int *ctl_len,
- char *data_buf, int data_maxlen, int *data_len, int *flags);
+extern int timod_putmsg(unsigned int fd, char __user *ctl_buf, int ctl_len,
+ char __user *data_buf, int data_len, int flags);
+extern int timod_getmsg(unsigned int fd, char __user *ctl_buf, int ctl_maxlen, int __user *ctl_len,
+ char __user *data_buf, int data_maxlen, int __user *data_len, int *flags);
/* termio* stuff {{{ */
static inline int linux_to_solaris_termio(unsigned int fd, unsigned int cmd, u32 arg)
{
+ struct solaris_termio __user *p = A(arg);
int ret;
- ret = sys_ioctl(fd, cmd, A(arg));
+ ret = sys_ioctl(fd, cmd, (unsigned long)p);
if (!ret) {
u32 cflag;
- if (__get_user (cflag, &((struct solaris_termio *)A(arg))->c_cflag))
+ if (__get_user (cflag, &p->c_cflag))
return -EFAULT;
cflag = linux_to_solaris_cflag(cflag);
- if (__put_user (cflag, &((struct solaris_termio *)A(arg))->c_cflag))
+ if (__put_user (cflag, &p->c_cflag))
return -EFAULT;
}
return ret;
struct solaris_termio s;
mm_segment_t old_fs = get_fs();
- if (copy_from_user (&s, (struct solaris_termio *)A(arg), sizeof(struct solaris_termio)))
+ if (copy_from_user (&s, (struct solaris_termio __user *)A(arg), sizeof(struct solaris_termio)))
return -EFAULT;
s.c_cflag = solaris_to_linux_cflag(s.c_cflag);
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, cmd, (unsigned long)&s);
set_fs(old_fs);
if (!ret) {
- if (put_user (s.c_iflag, &((struct solaris_termios *)A(arg))->c_iflag) ||
- __put_user (s.c_oflag, &((struct solaris_termios *)A(arg))->c_oflag) ||
- __put_user (linux_to_solaris_cflag(s.c_cflag), &((struct solaris_termios *)A(arg))->c_cflag) ||
- __put_user (s.c_lflag, &((struct solaris_termios *)A(arg))->c_lflag) ||
- __copy_to_user (((struct solaris_termios *)A(arg))->c_cc, s.c_cc, 16) ||
- __clear_user (((struct solaris_termios *)A(arg))->c_cc + 16, 2))
+ struct solaris_termios __user *p = A(arg);
+ if (put_user (s.c_iflag, &p->c_iflag) ||
+ __put_user (s.c_oflag, &p->c_oflag) ||
+ __put_user (linux_to_solaris_cflag(s.c_cflag), &p->c_cflag) ||
+ __put_user (s.c_lflag, &p->c_lflag) ||
+ __copy_to_user (p->c_cc, s.c_cc, 16) ||
+ __clear_user (p->c_cc + 16, 2))
return -EFAULT;
}
return ret;
{
int ret;
struct solaris_termios s;
+ struct solaris_termios __user *p = A(arg);
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ioctl(fd, TCGETS, (unsigned long)&s);
set_fs(old_fs);
if (ret) return ret;
- if (put_user (s.c_iflag, &((struct solaris_termios *)A(arg))->c_iflag) ||
- __put_user (s.c_oflag, &((struct solaris_termios *)A(arg))->c_oflag) ||
- __put_user (s.c_cflag, &((struct solaris_termios *)A(arg))->c_cflag) ||
- __put_user (s.c_lflag, &((struct solaris_termios *)A(arg))->c_lflag) ||
- __copy_from_user (s.c_cc, ((struct solaris_termios *)A(arg))->c_cc, 16))
+ if (put_user (s.c_iflag, &p->c_iflag) ||
+ __put_user (s.c_oflag, &p->c_oflag) ||
+ __put_user (s.c_cflag, &p->c_cflag) ||
+ __put_user (s.c_lflag, &p->c_lflag) ||
+ __copy_from_user (s.c_cc, p->c_cc, 16))
return -EFAULT;
s.c_cflag = solaris_to_linux_cflag(s.c_cflag);
set_fs(KERNEL_DS);
case 109: /* SI_SOCKPARAMS */
{
struct solaris_si_sockparams si;
- if (copy_from_user (&si, (struct solaris_si_sockparams *) A(arg), sizeof(si)))
+ if (copy_from_user (&si, A(arg), sizeof(si)))
return (EFAULT << 8) | TSYSERR;
/* Should we modify socket ino->socket_i.ops and type? */
case 110: /* SI_GETUDATA */
{
int etsdusize, servtype;
+ struct solaris_si_udata __user *p = A(arg);
switch (SOCKET_I(ino)->type) {
case SOCK_STREAM:
etsdusize = 1;
servtype = 3;
break;
}
- if (put_user(16384, &((struct solaris_si_udata *)A(arg))->tidusize) ||
- __put_user(sizeof(struct sockaddr), &((struct solaris_si_udata *)A(arg))->addrsize) ||
- __put_user(-1, &((struct solaris_si_udata *)A(arg))->optsize) ||
- __put_user(etsdusize, &((struct solaris_si_udata *)A(arg))->etsdusize) ||
- __put_user(servtype, &((struct solaris_si_udata *)A(arg))->servtype) ||
- __put_user(0, &((struct solaris_si_udata *)A(arg))->so_state) ||
- __put_user(0, &((struct solaris_si_udata *)A(arg))->so_options) ||
- __put_user(16384, &((struct solaris_si_udata *)A(arg))->tsdusize) ||
- __put_user(SOCKET_I(ino)->ops->family, &((struct solaris_si_udata *)A(arg))->sockparams.sp_family) ||
- __put_user(SOCKET_I(ino)->type, &((struct solaris_si_udata *)A(arg))->sockparams.sp_type) ||
- __put_user(SOCKET_I(ino)->ops->family, &((struct solaris_si_udata *)A(arg))->sockparams.sp_protocol))
+ if (put_user(16384, &p->tidusize) ||
+ __put_user(sizeof(struct sockaddr), &p->addrsize) ||
+ __put_user(-1, &p->optsize) ||
+ __put_user(etsdusize, &p->etsdusize) ||
+ __put_user(servtype, &p->servtype) ||
+ __put_user(0, &p->so_state) ||
+ __put_user(0, &p->so_options) ||
+ __put_user(16384, &p->tsdusize) ||
+ __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_family) ||
+ __put_user(SOCKET_I(ino)->type, &p->sockparams.sp_type) ||
+ __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_protocol))
return (EFAULT << 8) | TSYSERR;
return 0;
}
case 101: /* O_SI_GETUDATA */
{
int etsdusize, servtype;
+ struct solaris_o_si_udata __user *p = A(arg);
switch (SOCKET_I(ino)->type) {
case SOCK_STREAM:
etsdusize = 1;
servtype = 3;
break;
}
- if (put_user(16384, &((struct solaris_o_si_udata *)A(arg))->tidusize) ||
- __put_user(sizeof(struct sockaddr), &((struct solaris_o_si_udata *)A(arg))->addrsize) ||
- __put_user(-1, &((struct solaris_o_si_udata *)A(arg))->optsize) ||
- __put_user(etsdusize, &((struct solaris_o_si_udata *)A(arg))->etsdusize) ||
- __put_user(servtype, &((struct solaris_o_si_udata *)A(arg))->servtype) ||
- __put_user(0, &((struct solaris_o_si_udata *)A(arg))->so_state) ||
- __put_user(0, &((struct solaris_o_si_udata *)A(arg))->so_options) ||
- __put_user(16384, &((struct solaris_o_si_udata *)A(arg))->tsdusize))
+ if (put_user(16384, &p->tidusize) ||
+ __put_user(sizeof(struct sockaddr), &p->addrsize) ||
+ __put_user(-1, &p->optsize) ||
+ __put_user(etsdusize, &p->etsdusize) ||
+ __put_user(servtype, &p->servtype) ||
+ __put_user(0, &p->so_state) ||
+ __put_user(0, &p->so_options) ||
+ __put_user(16384, &p->tsdusize))
return (EFAULT << 8) | TSYSERR;
return 0;
}
}
static inline int solaris_timod(unsigned int fd, unsigned int cmd, u32 arg,
- int len, int *len_p)
+ int len, int __user *len_p)
{
int ret;
int i;
u32 prim;
SOLD("TI_OPMGMT entry");
- ret = timod_putmsg(fd, (char *)A(arg), len, NULL, -1, 0);
+ ret = timod_putmsg(fd, A(arg), len, NULL, -1, 0);
SOLD("timod_putmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
i = MSG_HIPRI;
SOLD("calling timod_getmsg()");
- ret = timod_getmsg(fd, (char *)A(arg), len, len_p, NULL, -1, NULL, &i);
+ ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
SOLD("timod_getmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
SOLD("ret ok");
- if (get_user(prim, (u32 *)A(arg)))
+ if (get_user(prim, (u32 __user *)A(arg)))
return (EFAULT << 8) | TSYSERR;
SOLD("got prim");
if (prim == T_ERROR_ACK) {
u32 tmp, tmp2;
SOLD("prim is T_ERROR_ACK");
- if (get_user(tmp, (u32 *)A(arg)+3) ||
- get_user(tmp2, (u32 *)A(arg)+2))
+ if (get_user(tmp, (u32 __user *)A(arg)+3) ||
+ get_user(tmp2, (u32 __user *)A(arg)+2))
return (EFAULT << 8) | TSYSERR;
return (tmp2 << 8) | tmp;
}
int i;
u32 prim;
SOLD("TI_BIND entry");
- ret = timod_putmsg(fd, (char *)A(arg), len, NULL, -1, 0);
+ ret = timod_putmsg(fd, A(arg), len, NULL, -1, 0);
SOLD("timod_putmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
len = 1024; /* Solaris allows arbitrary return size */
i = MSG_HIPRI;
SOLD("calling timod_getmsg()");
- ret = timod_getmsg(fd, (char *)A(arg), len, len_p, NULL, -1, NULL, &i);
+ ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
SOLD("timod_getmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
SOLD("ret ok");
- if (get_user(prim, (u32 *)A(arg)))
+ if (get_user(prim, (u32 __user *)A(arg)))
return (EFAULT << 8) | TSYSERR;
SOLD("got prim");
if (prim == T_ERROR_ACK) {
u32 tmp, tmp2;
SOLD("prim is T_ERROR_ACK");
- if (get_user(tmp, (u32 *)A(arg)+3) ||
- get_user(tmp2, (u32 *)A(arg)+2))
+ if (get_user(tmp, (u32 __user *)A(arg)+3) ||
+ get_user(tmp2, (u32 __user *)A(arg)+2))
return (EFAULT << 8) | TSYSERR;
return (tmp2 << 8) | tmp;
}
SOLD("OK_ACK requested");
i = MSG_HIPRI;
SOLD("calling timod_getmsg()");
- ret = timod_getmsg(fd, (char *)A(arg), len, len_p, NULL, -1, NULL, &i);
+ ret = timod_getmsg(fd, A(arg), len, len_p, NULL, -1, NULL, &i);
SOLD("timod_getmsg() returned");
if (ret)
return (-ret << 8) | TSYSERR;
return -ENOSYS;
case 2: /* I_PUSH */
{
- p = getname ((char *)A(arg));
+ p = getname (A(arg));
if (IS_ERR (p))
return PTR_ERR(p);
ret = -EINVAL;
const char *p;
if (sock->modcount <= 0) return -EINVAL;
p = module_table[(unsigned)sock->module[sock->modcount]].name;
- if (copy_to_user ((char *)A(arg), p, strlen(p)))
+ if (copy_to_user (A(arg), p, strlen(p)))
return -EFAULT;
return 0;
}
case 5: /* I_FLUSH */
return 0;
case 8: /* I_STR */
- if (copy_from_user(&si, (struct strioctl *)A(arg), sizeof(struct strioctl)))
+ if (copy_from_user(&si, A(arg), sizeof(struct strioctl)))
return -EFAULT;
/* We ignore what module is actually at the top of stack. */
switch ((si.cmd >> 8) & 0xff) {
return solaris_sockmod(fd, si.cmd, si.data);
case 'T':
return solaris_timod(fd, si.cmd, si.data, si.len,
- &((struct strioctl*)A(arg))->len);
+ &((struct strioctl __user *)A(arg))->len);
default:
return solaris_ioctl(fd, si.cmd, si.data);
}
case 11: /* I_FIND */
{
int i;
- p = getname ((char *)A(arg));
+ p = getname (A(arg));
if (IS_ERR (p))
return PTR_ERR(p);
ret = 0;
return 0; /* We don't support them */
case 1: /* SIOCGHIWAT */
case 3: /* SIOCGLOWAT */
- if (put_user (0, (u32 *)A(arg)))
+ if (put_user (0, (u32 __user *)A(arg)))
return -EFAULT;
return 0; /* Lie */
case 7: /* SIOCATMARK */
args);
set_fs(old_fs);
if (ret >= 0) {
- if (copy_to_user((char *)A(arg), &uaddr, uaddr_len))
+ if (copy_to_user(A(arg), &uaddr, uaddr_len))
return -EFAULT;
}
return ret;
for (d = dev_base; d; d = d->next) i++;
read_unlock_bh(&dev_base_lock);
- if (put_user (i, (int *)A(arg)))
+ if (put_user (i, (int __user *)A(arg)))
return -EFAULT;
return 0;
}
asmlinkage long solaris_shmsys(int cmd, u32 arg1, u32 arg2, u32 arg3)
{
- int (*sys_ipc)(unsigned,int,int,unsigned long,void *,long) =
- (int (*)(unsigned,int,int,unsigned long,void *,long))SYS(ipc);
+ int (*sys_ipc)(unsigned,int,int,unsigned long,void __user *,long) =
+ (int (*)(unsigned,int,int,unsigned long,void __user *,long))SYS(ipc);
mm_segment_t old_fs;
unsigned long raddr;
int ret;
case 0: /* shmat */
old_fs = get_fs();
set_fs(KERNEL_DS);
- ret = sys_ipc(SHMAT, arg1, arg3 & ~0x4000, (unsigned long)&raddr, (void *)A(arg2), 0);
+ ret = sys_ipc(SHMAT, arg1, arg3 & ~0x4000, (unsigned long)&raddr, A(arg2), 0);
set_fs(old_fs);
if (ret >= 0) return (u32)raddr;
else return ret;
case 11: /* IPC_SET */
{
struct shmid_ds s;
+ struct solaris_shmid_ds __user *p = A(arg3);
- if (get_user (s.shm_perm.uid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.uid)) ||
- __get_user (s.shm_perm.gid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.gid)) ||
- __get_user (s.shm_perm.mode, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.mode)))
+ if (get_user (s.shm_perm.uid, &p->shm_perm.uid) ||
+ __get_user (s.shm_perm.gid, &p->shm_perm.gid) ||
+ __get_user (s.shm_perm.mode, &p->shm_perm.mode))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
case 12: /* IPC_STAT */
{
struct shmid_ds s;
+ struct solaris_shmid_ds __user *p = A(arg3);
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_ipc(SHMCTL, arg1, IPC_SET, 0, &s, 0);
set_fs(old_fs);
- if (get_user (s.shm_perm.uid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.uid)) ||
- __get_user (s.shm_perm.gid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.gid)) ||
- __get_user (s.shm_perm.cuid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.cuid)) ||
- __get_user (s.shm_perm.cgid, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.cgid)) ||
- __get_user (s.shm_perm.mode, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.mode)) ||
- __get_user (s.shm_perm.seq, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.seq)) ||
- __get_user (s.shm_perm.key, &(((struct solaris_shmid_ds *)A(arg3))->shm_perm.key)) ||
- __get_user (s.shm_segsz, &(((struct solaris_shmid_ds *)A(arg3))->shm_segsz)) ||
- __get_user (s.shm_lpid, &(((struct solaris_shmid_ds *)A(arg3))->shm_lpid)) ||
- __get_user (s.shm_cpid, &(((struct solaris_shmid_ds *)A(arg3))->shm_cpid)) ||
- __get_user (s.shm_nattch, &(((struct solaris_shmid_ds *)A(arg3))->shm_nattch)) ||
- __get_user (s.shm_atime, &(((struct solaris_shmid_ds *)A(arg3))->shm_atime)) ||
- __get_user (s.shm_dtime, &(((struct solaris_shmid_ds *)A(arg3))->shm_dtime)) ||
- __get_user (s.shm_ctime, &(((struct solaris_shmid_ds *)A(arg3))->shm_ctime)))
+ if (put_user (s.shm_perm.uid, &(p->shm_perm.uid)) ||
+ __put_user (s.shm_perm.gid, &(p->shm_perm.gid)) ||
+ __put_user (s.shm_perm.cuid, &(p->shm_perm.cuid)) ||
+ __put_user (s.shm_perm.cgid, &(p->shm_perm.cgid)) ||
+ __put_user (s.shm_perm.mode, &(p->shm_perm.mode)) ||
+ __put_user (s.shm_perm.seq, &(p->shm_perm.seq)) ||
+ __put_user (s.shm_perm.key, &(p->shm_perm.key)) ||
+ __put_user (s.shm_segsz, &(p->shm_segsz)) ||
+ __put_user (s.shm_lpid, &(p->shm_lpid)) ||
+ __put_user (s.shm_cpid, &(p->shm_cpid)) ||
+ __put_user (s.shm_nattch, &(p->shm_nattch)) ||
+ __put_user (s.shm_atime, &(p->shm_atime)) ||
+ __put_user (s.shm_dtime, &(p->shm_dtime)) ||
+ __put_user (s.shm_ctime, &(p->shm_ctime)))
return -EFAULT;
return ret;
}
default: return -EINVAL;
}
case 2: /* shmdt */
- return sys_ipc(SHMDT, 0, 0, 0, (void *)A(arg1), 0);
+ return sys_ipc(SHMDT, 0, 0, 0, A(arg1), 0);
case 3: /* shmget */
return sys_ipc(SHMGET, arg1, arg2, arg3, NULL, 0);
}
u32 offlo;
if (regs->u_regs[UREG_G1]) {
- if (get_user (offlo, (u32 *)(long)((u32)regs->u_regs[UREG_I6] + 0x5c)))
+ if (get_user (offlo, (u32 __user *)(long)((u32)regs->u_regs[UREG_I6] + 0x5c)))
return -EFAULT;
} else {
- if (get_user (offlo, (u32 *)(long)((u32)regs->u_regs[UREG_I6] + 0x60)))
+ if (get_user (offlo, (u32 __user *)(long)((u32)regs->u_regs[UREG_I6] + 0x60)))
return -EFAULT;
}
return do_solaris_mmap((u32)regs->u_regs[UREG_I0], len, prot, flags, fd, (((u64)offhi)<<32)|offlo);
for (p=from,i=0; *p && *p != '.' && --len; p++,i++); \
else \
i = len - 1; \
- if (__put_user('\0', (char *)(to+i))) \
+ if (__put_user('\0', (char __user *)((to)+i))) \
return -EFAULT; \
}
asmlinkage int solaris_utssys(u32 buf, u32 flags, int which, u32 buf2)
{
+ struct sol_uname __user *v = A(buf);
switch (which) {
case 0: /* old uname */
/* Let's cheat */
- set_utsfield(((struct sol_uname *)A(buf))->sysname,
- "SunOS", 1, 0);
+ set_utsfield(v->sysname, "SunOS", 1, 0);
down_read(&uts_sem);
- set_utsfield(((struct sol_uname *)A(buf))->nodename,
- system_utsname.nodename, 1, 1);
+ set_utsfield(v->nodename, system_utsname.nodename, 1, 1);
up_read(&uts_sem);
- set_utsfield(((struct sol_uname *)A(buf))->release,
- "2.6", 0, 0);
- set_utsfield(((struct sol_uname *)A(buf))->version,
- "Generic", 0, 0);
- set_utsfield(((struct sol_uname *)A(buf))->machine,
- machine(), 0, 0);
+ set_utsfield(v->release, "2.6", 0, 0);
+ set_utsfield(v->version, "Generic", 0, 0);
+ set_utsfield(v->machine, machine(), 0, 0);
return 0;
case 2: /* ustat */
return -ENOSYS;
asmlinkage int solaris_utsname(u32 buf)
{
+ struct sol_utsname __user *v = A(buf);
/* Why should we not lie a bit? */
down_read(&uts_sem);
- set_utsfield(((struct sol_utsname *)A(buf))->sysname,
- "SunOS", 0, 0);
- set_utsfield(((struct sol_utsname *)A(buf))->nodename,
- system_utsname.nodename, 1, 1);
- set_utsfield(((struct sol_utsname *)A(buf))->release,
- "5.6", 0, 0);
- set_utsfield(((struct sol_utsname *)A(buf))->version,
- "Generic", 0, 0);
- set_utsfield(((struct sol_utsname *)A(buf))->machine,
- machine(), 0, 0);
+ set_utsfield(v->sysname, "SunOS", 0, 0);
+ set_utsfield(v->nodename, system_utsname.nodename, 1, 1);
+ set_utsfield(v->release, "5.6", 0, 0);
+ set_utsfield(v->version, "Generic", 0, 0);
+ set_utsfield(v->machine, machine(), 0, 0);
up_read(&uts_sem);
return 0;
}
}
len = strlen(r) + 1;
if (count < len) {
- if (copy_to_user((char *)A(buf), r, count - 1) ||
- __put_user(0, (char *)A(buf) + count - 1))
+ if (copy_to_user(A(buf), r, count - 1) ||
+ __put_user(0, (char __user *)A(buf) + count - 1))
return -EFAULT;
} else {
- if (copy_to_user((char *)A(buf), r, len))
+ if (copy_to_user(A(buf), r, len))
return -EFAULT;
}
return len;
u32 rlim_max;
};
-asmlinkage int solaris_getrlimit(unsigned int resource, struct rlimit32 *rlim)
+asmlinkage int solaris_getrlimit(unsigned int resource, struct rlimit32 __user *rlim)
{
struct rlimit r;
int ret;
return ret;
}
-asmlinkage int solaris_setrlimit(unsigned int resource, struct rlimit32 *rlim)
+asmlinkage int solaris_setrlimit(unsigned int resource, struct rlimit32 __user *rlim)
{
struct rlimit r, rold;
int ret;
mm_segment_t old_fs = get_fs ();
- int (*sys_getrlimit)(unsigned int, struct rlimit *) =
- (int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
- int (*sys_setrlimit)(unsigned int, struct rlimit *) =
- (int (*)(unsigned int, struct rlimit *))SYS(setrlimit);
+ int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
+ (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
+ int (*sys_setrlimit)(unsigned int, struct rlimit __user *) =
+ (int (*)(unsigned int, struct rlimit __user *))SYS(setrlimit);
if (resource > RLIMIT_SOL_VMEM)
return -EINVAL;
return ret;
}
-asmlinkage int solaris_getrlimit64(unsigned int resource, struct rlimit *rlim)
+asmlinkage int solaris_getrlimit64(unsigned int resource, struct rlimit __user *rlim)
{
struct rlimit r;
int ret;
mm_segment_t old_fs = get_fs ();
- int (*sys_getrlimit)(unsigned int, struct rlimit *) =
- (int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
+ int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
+ (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
if (resource > RLIMIT_SOL_VMEM)
return -EINVAL;
return ret;
}
-asmlinkage int solaris_setrlimit64(unsigned int resource, struct rlimit *rlim)
+asmlinkage int solaris_setrlimit64(unsigned int resource, struct rlimit __user *rlim)
{
struct rlimit r, rold;
int ret;
mm_segment_t old_fs = get_fs ();
- int (*sys_getrlimit)(unsigned int, struct rlimit *) =
- (int (*)(unsigned int, struct rlimit *))SYS(getrlimit);
- int (*sys_setrlimit)(unsigned int, struct rlimit *) =
- (int (*)(unsigned int, struct rlimit *))SYS(setrlimit);
+ int (*sys_getrlimit)(unsigned int, struct rlimit __user *) =
+ (int (*)(unsigned int, struct rlimit __user *))SYS(getrlimit);
+ int (*sys_setrlimit)(unsigned int, struct rlimit __user *) =
+ (int (*)(unsigned int, struct rlimit __user *))SYS(setrlimit);
if (resource > RLIMIT_SOL_VMEM)
return -EINVAL;
s32 stbcnt;
};
-asmlinkage int solaris_ntp_gettime(struct sol_ntptimeval *ntp)
+asmlinkage int solaris_ntp_gettime(struct sol_ntptimeval __user *ntp)
{
- int (*sys_adjtimex)(struct timex *) =
- (int (*)(struct timex *))SYS(adjtimex);
+ int (*sys_adjtimex)(struct timex __user *) =
+ (int (*)(struct timex __user *))SYS(adjtimex);
struct timex t;
int ret;
mm_segment_t old_fs = get_fs();
return ret;
}
-asmlinkage int solaris_ntp_adjtime(struct sol_timex *txp)
+asmlinkage int solaris_ntp_adjtime(struct sol_timex __user *txp)
{
- int (*sys_adjtimex)(struct timex *) =
- (int (*)(struct timex *))SYS(adjtimex);
+ int (*sys_adjtimex)(struct timex __user *) =
+ (int (*)(struct timex __user *))SYS(adjtimex);
struct timex t;
int ret, err;
mm_segment_t old_fs = get_fs();
struct sigaction sa, old;
int ret;
mm_segment_t old_fs = get_fs();
- int (*sys_sigaction)(int,struct sigaction *,struct sigaction *) =
- (int (*)(int,struct sigaction *,struct sigaction *))SYS(sigaction);
+ int (*sys_sigaction)(int,struct sigaction __user *,struct sigaction __user *) =
+ (int (*)(int,struct sigaction __user *,struct sigaction __user *))SYS(sigaction);
sigemptyset(&sa.sa_mask);
sa.sa_restorer = NULL;
sa.sa_flags = 0;
if (one_shot) sa.sa_flags = SA_ONESHOT | SA_NOMASK;
set_fs (KERNEL_DS);
- ret = sys_sigaction(sig, &sa, &old);
+ ret = sys_sigaction(sig, (void __user *)&sa, (void __user *)&old);
set_fs (old_fs);
if (ret < 0) return ret;
- return (u32)(long)old.sa_handler;
+ return (u32)(unsigned long)old.sa_handler;
}
static inline long solaris_signal(int sig, u32 arg)
static inline long solaris_sigignore(int sig)
{
- return sig_handler (sig, (u32)SIG_IGN, 0);
+ return sig_handler(sig, (u32)(unsigned long)SIG_IGN, 0);
}
static inline long solaris_sigpause(int sig)
sigset_t in_s, *ins, out_s, *outs;
mm_segment_t old_fs = get_fs();
int ret;
- int (*sys_sigprocmask)(int,sigset_t *,sigset_t *) =
- (int (*)(int,sigset_t *,sigset_t *))SYS(sigprocmask);
+ int (*sys_sigprocmask)(int,sigset_t __user *,sigset_t __user *) =
+ (int (*)(int,sigset_t __user *,sigset_t __user *))SYS(sigprocmask);
ins = NULL; outs = NULL;
if (in) {
u32 tmp[2];
- if (copy_from_user (tmp, (sol_sigset_t *)A(in), 2*sizeof(u32)))
+ if (copy_from_user (tmp, (void __user *)A(in), 2*sizeof(u32)))
return -EFAULT;
ins = &in_s;
if (mapin (tmp, ins)) return -EINVAL;
}
if (out) outs = &out_s;
set_fs (KERNEL_DS);
- ret = sys_sigprocmask((how == 3) ? SIG_SETMASK : how, ins, outs);
+ ret = sys_sigprocmask((how == 3) ? SIG_SETMASK : how,
+ (void __user *)ins, (void __user *)outs);
set_fs (old_fs);
if (ret) return ret;
if (out) {
tmp[2] = 0; tmp[3] = 0;
if (mapout (outs, tmp)) return -EINVAL;
- if (copy_to_user((sol_sigset_t *)A(out), tmp, 4*sizeof(u32)))
+ if (copy_to_user((void __user *)A(out), tmp, 4*sizeof(u32)))
return -EFAULT;
}
return 0;
sigset_t s;
u32 tmp[2];
- if (copy_from_user (tmp, (sol_sigset_t *)A(mask), 2*sizeof(u32)))
+ if (copy_from_user (tmp, (sol_sigset_t __user *)A(mask), 2*sizeof(u32)))
return -EFAULT;
if (mapin (tmp, &s)) return -EINVAL;
return (long)s.sig[0];
struct sigaction s, s2;
int ret;
mm_segment_t old_fs = get_fs();
- int (*sys_sigaction)(int,struct sigaction *,struct sigaction *) =
- (int (*)(int,struct sigaction *,struct sigaction *))SYS(sigaction);
+ struct sol_sigaction __user *p = (void __user *)A(old);
+ int (*sys_sigaction)(int,struct sigaction __user *,struct sigaction __user *) =
+ (int (*)(int,struct sigaction __user *,struct sigaction __user *))SYS(sigaction);
sig = mapsig(sig);
if (sig < 0) {
/* We cheat a little bit for Solaris only signals */
- if (old && clear_user((struct sol_sigaction *)A(old), sizeof(struct sol_sigaction)))
+ if (old && clear_user(p, sizeof(struct sol_sigaction)))
return -EFAULT;
return 0;
}
if (act) {
- if (get_user (tmp, &((struct sol_sigaction *)A(act))->sa_flags))
+ if (get_user (tmp, &p->sa_flags))
return -EFAULT;
s.sa_flags = 0;
if (tmp & SOLARIS_SA_ONSTACK) s.sa_flags |= SA_STACK;
if (tmp & SOLARIS_SA_NODEFER) s.sa_flags |= SA_NOMASK;
if (tmp & SOLARIS_SA_RESETHAND) s.sa_flags |= SA_ONESHOT;
if (tmp & SOLARIS_SA_NOCLDSTOP) s.sa_flags |= SA_NOCLDSTOP;
- if (get_user (tmp, &((struct sol_sigaction *)A(act))->sa_handler) ||
- copy_from_user (tmp2, &((struct sol_sigaction *)A(act))->sa_mask, 2*sizeof(u32)))
+ if (get_user (tmp, &p->sa_handler) ||
+ copy_from_user (tmp2, &p->sa_mask, 2*sizeof(u32)))
return -EFAULT;
s.sa_handler = (__sighandler_t)A(tmp);
if (mapin (tmp2, &s.sa_mask)) return -EINVAL;
- s.sa_restorer = 0;
+ s.sa_restorer = NULL;
}
set_fs(KERNEL_DS);
- ret = sys_sigaction(sig, act ? &s : NULL, old ? &s2 : NULL);
+ ret = sys_sigaction(sig, act ? (void __user *)&s : NULL,
+ old ? (void __user *)&s2 : NULL);
set_fs(old_fs);
if (ret) return ret;
if (old) {
if (s2.sa_flags & SA_NOMASK) tmp |= SOLARIS_SA_NODEFER;
if (s2.sa_flags & SA_ONESHOT) tmp |= SOLARIS_SA_RESETHAND;
if (s2.sa_flags & SA_NOCLDSTOP) tmp |= SOLARIS_SA_NOCLDSTOP;
- if (put_user (tmp, &((struct sol_sigaction *)A(old))->sa_flags) ||
- __put_user ((u32)(long)s2.sa_handler, &((struct sol_sigaction *)A(old))->sa_handler) ||
- copy_to_user (&((struct sol_sigaction *)A(old))->sa_mask, tmp2, 4*sizeof(u32)))
+ if (put_user (tmp, &p->sa_flags) ||
+ __put_user ((u32)(unsigned long)s2.sa_handler, &p->sa_handler) ||
+ copy_to_user (&p->sa_mask, tmp2, 4*sizeof(u32)))
return -EFAULT;
}
return 0;
}
if (mapout (&s, tmp)) return -EINVAL;
tmp[2] = 0; tmp[3] = 0;
- if (copy_to_user ((u32 *)A(set), tmp, sizeof(tmp)))
+ if (copy_to_user ((u32 __user *)A(set), tmp, sizeof(tmp)))
return -EFAULT;
return 0;
}
asmlinkage int solaris_wait(u32 stat_loc)
{
- int (*sys_wait4)(pid_t,unsigned int *, int, struct rusage *) =
- (int (*)(pid_t,unsigned int *, int, struct rusage *))SYS(wait4);
+ unsigned __user *p = (unsigned __user *)A(stat_loc);
+ int (*sys_wait4)(pid_t,unsigned __user *, int, struct rusage __user *) =
+ (int (*)(pid_t,unsigned __user *, int, struct rusage __user *))SYS(wait4);
int ret, status;
- ret = sys_wait4(-1, (unsigned int *)A(stat_loc), WUNTRACED, NULL);
+ ret = sys_wait4(-1, p, WUNTRACED, NULL);
if (ret >= 0 && stat_loc) {
- if (get_user (status, (unsigned int *)A(stat_loc)))
+ if (get_user (status, p))
return -EFAULT;
if (((status - 1) & 0xffff) < 0xff)
status = linux_to_solaris_signals[status & 0x7f] & 0x7f;
else if ((status & 0xff) == 0x7f)
status = (linux_to_solaris_signals[(status >> 8) & 0xff] << 8) | 0x7f;
- if (__put_user (status, (unsigned int *)A(stat_loc)))
+ if (__put_user (status, p))
return -EFAULT;
}
return ret;
asmlinkage int solaris_waitid(int idtype, s32 pid, u32 info, int options)
{
- int (*sys_wait4)(pid_t,unsigned int *, int, struct rusage *) =
- (int (*)(pid_t,unsigned int *, int, struct rusage *))SYS(wait4);
+ int (*sys_wait4)(pid_t,unsigned __user *, int, struct rusage __user *) =
+ (int (*)(pid_t,unsigned __user *, int, struct rusage __user *))SYS(wait4);
int opts, status, ret;
switch (idtype) {
if (options & SOLARIS_WUNTRACED) opts |= WUNTRACED;
if (options & SOLARIS_WNOHANG) opts |= WNOHANG;
current->state = TASK_RUNNING;
- ret = sys_wait4(pid, (unsigned int *)A(info), opts, NULL);
+ ret = sys_wait4(pid, (unsigned int __user *)A(info), opts, NULL);
if (ret < 0) return ret;
if (info) {
- struct sol_siginfo *s = (struct sol_siginfo *)A(info);
+ struct sol_siginfo __user *s = (void __user *)A(info);
- if (get_user (status, (unsigned int *)A(info)))
+ if (get_user (status, (unsigned int __user *)A(info)))
return -EFAULT;
if (__put_user (SOLARIS_SIGCLD, &s->si_signo) ||
return sunos_getsockopt(fd, level, optname, optval, optlen);
}
-asmlinkage int solaris_connect(int fd, struct sockaddr *addr, int addrlen)
+asmlinkage int solaris_connect(int fd, struct sockaddr __user *addr, int addrlen)
{
- int (*sys_connect)(int, struct sockaddr *, int) =
- (int (*)(int, struct sockaddr *, int))SYS(connect);
+ int (*sys_connect)(int, struct sockaddr __user *, int) =
+ (int (*)(int, struct sockaddr __user *, int))SYS(connect);
return sys_connect(fd, addr, addrlen);
}
-asmlinkage int solaris_accept(int fd, struct sockaddr *addr, int *addrlen)
+asmlinkage int solaris_accept(int fd, struct sockaddr __user *addr, int __user *addrlen)
{
- int (*sys_accept)(int, struct sockaddr *, int *) =
- (int (*)(int, struct sockaddr *, int *))SYS(accept);
+ int (*sys_accept)(int, struct sockaddr __user *, int __user *) =
+ (int (*)(int, struct sockaddr __user *, int __user *))SYS(accept);
return sys_accept(fd, addr, addrlen);
}
return fl;
}
-asmlinkage int solaris_recvfrom(int s, char *buf, int len, int flags, u32 from, u32 fromlen)
+asmlinkage int solaris_recvfrom(int s, char __user *buf, int len, int flags, u32 from, u32 fromlen)
{
- int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
- (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
+ int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
+ (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
- return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), (struct sockaddr *)A(from), (int *)A(fromlen));
+ return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), A(from), A(fromlen));
}
-asmlinkage int solaris_recv(int s, char *buf, int len, int flags)
+asmlinkage int solaris_recv(int s, char __user *buf, int len, int flags)
{
- int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
- (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
+ int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
+ (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
return sys_recvfrom(s, buf, len, solaris_to_linux_msgflags(flags), NULL, NULL);
}
-asmlinkage int solaris_sendto(int s, char *buf, int len, int flags, u32 to, u32 tolen)
+asmlinkage int solaris_sendto(int s, char __user *buf, int len, int flags, u32 to, u32 tolen)
{
- int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int *) =
- (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(sendto);
+ int (*sys_sendto)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *) =
+ (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(sendto);
- return sys_sendto(s, buf, len, solaris_to_linux_msgflags(flags), (struct sockaddr *)A(to), (int *)A(tolen));
+ return sys_sendto(s, buf, len, solaris_to_linux_msgflags(flags), A(to), A(tolen));
}
asmlinkage int solaris_send(int s, char *buf, int len, int flags)
};
static inline int msghdr_from_user32_to_kern(struct msghdr *kmsg,
- struct sol_nmsghdr *umsg)
+ struct sol_nmsghdr __user *umsg)
{
u32 tmp1, tmp2, tmp3;
int err;
if (err)
return -EFAULT;
- kmsg->msg_name = (void *)A(tmp1);
- kmsg->msg_iov = (struct iovec *)A(tmp2);
- kmsg->msg_control = (void *)A(tmp3);
+ kmsg->msg_name = A(tmp1);
+ kmsg->msg_iov = A(tmp2);
+ kmsg->msg_control = A(tmp3);
err = get_user(kmsg->msg_namelen, &umsg->msg_namelen);
err |= get_user(kmsg->msg_controllen, &umsg->msg_controllen);
return err;
}
-asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr *user_msg, unsigned user_flags)
+asmlinkage int solaris_sendmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned user_flags)
{
struct socket *sock;
char address[MAX_SOCK_ADDR];
total_len = err;
if(kern_msg.msg_controllen) {
- struct sol_cmsghdr *ucmsg = (struct sol_cmsghdr *)kern_msg.msg_control;
+ struct sol_cmsghdr __user *ucmsg = kern_msg.msg_control;
unsigned long *kcmsg;
compat_size_t cmlen;
return err;
}
-asmlinkage int solaris_recvmsg(int fd, struct sol_nmsghdr *user_msg, unsigned int user_flags)
+asmlinkage int solaris_recvmsg(int fd, struct sol_nmsghdr __user *user_msg, unsigned int user_flags)
{
struct iovec iovstack[UIO_FASTIOV];
struct msghdr kern_msg;
char addr[MAX_SOCK_ADDR];
struct socket *sock;
struct iovec *iov = iovstack;
- struct sockaddr *uaddr;
- int *uaddr_len;
+ struct sockaddr __user *uaddr;
+ int __user *uaddr_len;
unsigned long cmsg_ptr;
int err, total_len, len = 0;
SOLD("done");
}
-static int timod_optmgmt(unsigned int fd, int flag, char *opt_buf, int opt_len, int do_ret)
+static int timod_optmgmt(unsigned int fd, int flag, char __user *opt_buf, int opt_len, int do_ret)
{
int error, failed;
int ret_space, ret_len;
return 0;
}
-int timod_putmsg(unsigned int fd, char *ctl_buf, int ctl_len,
- char *data_buf, int data_len, int flags)
+int timod_putmsg(unsigned int fd, char __user *ctl_buf, int ctl_len,
+ char __user *data_buf, int data_len, int flags)
{
int ret, error, terror;
char *buf;
struct sol_socket_struct *sock;
mm_segment_t old_fs = get_fs();
long args[6];
- int (*sys_socketcall)(int, unsigned long *) =
- (int (*)(int, unsigned long *))SYS(socketcall);
- int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int) =
- (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int))SYS(sendto);
+ int (*sys_socketcall)(int, unsigned long __user *) =
+ (int (*)(int, unsigned long __user *))SYS(socketcall);
+ int (*sys_sendto)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int) =
+ (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int))SYS(sendto);
filp = current->files->fd[fd];
ino = filp->f_dentry->d_inode;
sock = (struct sol_socket_struct *)filp->private_data;
SOLD("entry");
- if (get_user(ret, (int *)A(ctl_buf)))
+ if (get_user(ret, (int __user *)A(ctl_buf)))
return -EFAULT;
switch (ret) {
case T_BIND_REQ:
printk("\n");
}
#endif
- err = sys_sendto(fd, data_buf, data_len, 0, req.DEST_length > 0 ? (struct sockaddr*)(ctl_buf+req.DEST_offset) : NULL, req.DEST_length);
+ err = sys_sendto(fd, data_buf, data_len, 0, req.DEST_length > 0 ? (struct sockaddr __user *)(ctl_buf+req.DEST_offset) : NULL, req.DEST_length);
if (err == data_len)
return 0;
if(err >= 0) {
return -EINVAL;
}
-int timod_getmsg(unsigned int fd, char *ctl_buf, int ctl_maxlen, s32 *ctl_len,
- char *data_buf, int data_maxlen, s32 *data_len, int *flags_p)
+int timod_getmsg(unsigned int fd, char __user *ctl_buf, int ctl_maxlen, s32 __user *ctl_len,
+ char __user *data_buf, int data_maxlen, s32 __user *data_len, int *flags_p)
{
int error;
int oldflags;
struct T_unitdata_ind udi;
mm_segment_t old_fs = get_fs();
long args[6];
- char *tmpbuf;
+ char __user *tmpbuf;
int tmplen;
- int (*sys_socketcall)(int, unsigned long *) =
- (int (*)(int, unsigned long *))SYS(socketcall);
- int (*sys_recvfrom)(int, void *, size_t, unsigned, struct sockaddr *, int *);
+ int (*sys_socketcall)(int, unsigned long __user *) =
+ (int (*)(int, unsigned long __user *))SYS(socketcall);
+ int (*sys_recvfrom)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *);
SOLD("entry");
SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p));
oldflags = filp->f_flags;
filp->f_flags |= O_NONBLOCK;
SOLD("calling recvfrom");
- sys_recvfrom = (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int *))SYS(recvfrom);
- error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr*)tmpbuf, ctl_len);
+ sys_recvfrom = (int (*)(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *))SYS(recvfrom);
+ error = sys_recvfrom(fd, data_buf, data_maxlen, 0, (struct sockaddr __user *)tmpbuf, ctl_len);
filp->f_flags = oldflags;
if (error < 0)
return error;
{
struct file *filp;
struct inode *ino;
- struct strbuf *ctlptr, *datptr;
+ struct strbuf __user *ctlptr;
+ struct strbuf __user *datptr;
struct strbuf ctl, dat;
- int *flgptr;
+ int __user *flgptr;
int flags;
int error = -EBADF;
if (!ino->i_sock)
goto out;
- ctlptr = (struct strbuf *)A(arg1);
- datptr = (struct strbuf *)A(arg2);
- flgptr = (int *)A(arg3);
+ ctlptr = (struct strbuf __user *)A(arg1);
+ datptr = (struct strbuf __user *)A(arg2);
+ flgptr = (int __user *)A(arg3);
error = -EFAULT;
goto out;
}
- error = timod_getmsg(fd,(char*)A(ctl.buf),ctl.maxlen,&ctlptr->len,
- (char*)A(dat.buf),dat.maxlen,&datptr->len,&flags);
+ error = timod_getmsg(fd,A(ctl.buf),ctl.maxlen,&ctlptr->len,
+ A(dat.buf),dat.maxlen,&datptr->len,&flags);
if (!error && put_user(flags,flgptr))
error = -EFAULT;
{
struct file *filp;
struct inode *ino;
- struct strbuf *ctlptr, *datptr;
+ struct strbuf __user *ctlptr;
+ struct strbuf __user *datptr;
struct strbuf ctl, dat;
int flags = (int) arg3;
int error = -EBADF;
(imajor(ino) != 30 || iminor(ino) != 1))
goto out;
- ctlptr = (struct strbuf *)A(arg1);
- datptr = (struct strbuf *)A(arg2);
+ ctlptr = A(arg1);
+ datptr = A(arg2);
error = -EFAULT;
dat.buf = 0;
}
- error = timod_putmsg(fd,(char*)A(ctl.buf),ctl.len,
- (char*)A(dat.buf),dat.len,flags);
+ error = timod_putmsg(fd,A(ctl.buf),ctl.len,
+ A(dat.buf),dat.len,flags);
out:
unlock_kernel();
SOLD("done");
timer_alive = 1;
unlock_kernel();
- return 0;
+ return nonseekable_open(inode, file);
}
extern void stop_watchdog(int in_fd, int out_fd);
static ssize_t harddog_write(struct file *file, const char *data, size_t len,
loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/*
* Refresh the timer.
*/
oldvalp = (void *) A(a32.oldval);
newvalp = (void *) A(a32.newval);
- if ((oldvalp && get_user(oldlen, (int *) A(a32.oldlenp)))
+ if ((oldvalp && get_user(oldlen, (int __user *)compat_ptr(a32.oldlenp)))
|| !access_ok(VERIFY_WRITE, namep, 0)
|| !access_ok(VERIFY_WRITE, oldvalp, 0)
|| !access_ok(VERIFY_WRITE, newvalp, 0))
unlock_kernel();
set_fs(old_fs);
- if (oldvalp && put_user (oldlen, (int *) A(a32.oldlenp)))
+ if (oldvalp && put_user (oldlen, (int __user *)compat_ptr(a32.oldlenp)))
return -EFAULT;
return ret;
long
sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *timer_id)
{
- struct sigevent se;
- mm_segment_t oldfs;
- long err;
-
+ struct sigevent __user *p = NULL;
if (se32) {
+ struct sigevent se;
+ p = compat_alloc_user_space(sizeof(struct sigevent));
memset(&se, 0, sizeof(struct sigevent));
if (get_user(se.sigev_value.sival_int, &se32->sigev_value) ||
__get_user(se.sigev_signo, &se32->sigev_signo) ||
__get_user(se.sigev_notify, &se32->sigev_notify) ||
__copy_from_user(&se._sigev_un._pad, &se32->payload,
- sizeof(se32->payload)))
+ sizeof(se32->payload)) ||
+ copy_to_user(p, &se, sizeof(se)))
return -EFAULT;
}
- if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
- return -EFAULT;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_timer_create(clock, se32 ? &se : NULL, timer_id);
- set_fs(oldfs);
-
- return err;
+ return sys_timer_create(clock, p, timer_id);
}
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
BUILD_14_IRQS(0xe)
#endif
IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
IRQLIST_16(0xc), IRQLIST_16(0xd)
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
, IRQLIST_14(0xe)
#endif
} irq_2_pin[PIN_MAP_SIZE];
int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
#define vector_to_irq(vector) \
(platform_legacy_irq(vector) ? vector : vector_irq[vector])
#else
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
int assign_irq_vector(int irq)
#else
int __init assign_irq_vector(int irq)
spin_unlock_irqrestore(&ioapic_lock, flags);
}
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
static unsigned int startup_edge_ioapic_vector(unsigned int vector)
{
int irq = vector_to_irq(vector);
return;
}
printk(" failed :(.\n");
- panic("IO-APIC + timer doesn't work! pester mingo@redhat.com");
+ panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
}
/*
int apic_version [MAX_APICS];
unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-cpumask_t mp_bus_to_cpumask [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = CPU_MASK_ALL };
+cpumask_t pci_bus_to_cpumask [256] = { [0 ... 255] = CPU_MASK_ALL };
int mp_current_pci_id = 0;
/* I/O APIC entries */
static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
+static u32 gart_unmapped_entry;
+
#define GPTE_VALID 1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
static void flush_gart(struct pci_dev *dev)
{
unsigned long flags;
- int bus = dev ? dev->bus->number : -1;
- cpumask_t bus_cpumask = pcibus_to_cpumask(bus);
int flushed = 0;
int i;
u32 w;
if (!northbridges[i])
continue;
- if (bus >= 0 && !(cpu_isset(i, bus_cpumask)))
- continue;
pci_write_config_dword(northbridges[i], 0x9c,
northbridge_flush_word[i] | 1);
/* Make sure the hardware actually executed the flush. */
flushed++;
}
if (!flushed)
- printk("nothing to flush? %d\n", bus);
+ printk("nothing to flush?\n");
need_flush = 0;
}
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
unsigned long pages = 0;
int need = 0, nextneed;
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb)
+ return swiotlb_map_sg(&dev->dev,sg,nents,dir);
+#endif
+
BUG_ON(dir == PCI_DMA_NONE);
if (nents == 0)
return 0;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = to_pages(dma_addr, size);
for (i = 0; i < npages; i++) {
- iommu_gatt_base[iommu_page + i] = 0;
+ iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
CLEAR_LEAK(iommu_page + i);
}
free_iommu(iommu_page, npages);
unsigned long aper_size;
unsigned long iommu_start;
struct pci_dev *dev;
-
+ unsigned long scratch;
+ long i;
#ifndef CONFIG_AGP_AMD64
no_agp = 1;
return -1;
}
}
-
+
aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
*/
clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
+ /*
+ * Try to workaround a bug (thanks to BenH)
+ * Set unmapped entries to a scratch page instead of 0.
+ * Any prefetches that hit unmapped entries won't get an bus abort
+ * then.
+ */
+ scratch = get_zeroed_page(GFP_KERNEL);
+ if (!scratch)
+ panic("Cannot allocate iommu scratch page");
+ gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
+ for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
+ iommu_gatt_base[i] = gart_unmapped_entry;
+
for_all_nb(dev) {
u32 flag;
int cpu = PCI_SLOT(dev->devfn) - 24;
if (me->used_math) {
fp = get_stack(ka, regs, sizeof(struct _fpstate));
- frame = (void __user *)round_down((u64)fp - sizeof(struct rt_sigframe), 16) - 8;
+ frame = (void __user *)round_down((unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) {
goto give_sigsegv;
Dprintk("CPU has booted.\n");
} else {
boot_error = 1;
- if (*((volatile unsigned char *)phys_to_virt(8192))
+ if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
== 0xA5)
/* trampoline started but...? */
printk("Stuck ??\n");
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
cpucount--;
}
-
- /* mark "stuck" area as not stuck */
- *((volatile unsigned *)phys_to_virt(8192)) = 0;
}
cycles_t cacheflush_time;
# Makefile for the linux x86_64-specific parts of the memory manager.
#
-obj-y := init.o fault.o ioremap.o extable.o pageattr.o
+obj-y := init.o fault.o ioremap.o extable.o pageattr.o mmap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_DISCONTIGMEM) += numa.o
obj-$(CONFIG_K8_NUMA) += k8topology.o
__flush_tlb_all();
}
-int page_is_ram (unsigned long pagenr)
+static inline int page_is_ram (unsigned long pagenr)
{
int i;
return 0;
}
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address is
+ * valid. The argument is a physical page number.
+ *
+ *
+ * On x86-64, access has to be given to the first megabyte of ram because that area
+ * contains bios code and data regions used by X and dosemu and similar apps.
+ * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+ * mmio resources as well as potential bios/acpi data regions.
+ */
+int devmem_is_allowed(unsigned long pagenr)
+{
+ if (pagenr <= 256)
+ return 1;
+ if (!page_is_ram(pagenr))
+ return 1;
+ return 0;
+}
+
+
static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
kcore_vsyscall;
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
#
# General setup
CONFIG_PCI_BIOS=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
-# CONFIG_PCI_USE_VECTOR is not set
+CONFIG_PCI_MSI=y
CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_NAMES is not set
CONFIG_ISA=y
#
# Generic Driver Options
#
+CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
-# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-# CONFIG_BLK_DEV_IDESCSI is not set
+CONFIG_BLK_DEV_IDESCSI=m
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CLK_JIFFIES=y
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
+CONFIG_TUX=m
+
+#
+# TUX options
+#
+CONFIG_TUX_EXTCGI=y
+# CONFIG_TUX_EXTENDED_LOG is not set
+# CONFIG_TUX_DEBUG is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
+# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
-# CONFIG_JFFS2_PROC is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
+CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
-CONFIG_DEBUG_HIGHMEM=y
+# CONFIG_DEBUG_HIGHMEM is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MD5=m
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_AES_586=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
CONFIG_BROKEN_ON_SMP=y
#
#
# Generic Driver Options
#
+CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
-# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-# CONFIG_BLK_DEV_IDESCSI is not set
+CONFIG_BLK_DEV_IDESCSI=m
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CLK_JIFFIES=y
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
+CONFIG_TUX=m
+
+#
+# TUX options
+#
+CONFIG_TUX_EXTCGI=y
+# CONFIG_TUX_EXTENDED_LOG is not set
+# CONFIG_TUX_DEBUG is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
+# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
-# CONFIG_JFFS2_PROC is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
+CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
-CONFIG_DEBUG_HIGHMEM=y
+# CONFIG_DEBUG_HIGHMEM is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MD5=m
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_AES_586=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
#
# General setup
CONFIG_PCI_BIOS=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
-# CONFIG_PCI_USE_VECTOR is not set
+CONFIG_PCI_MSI=y
CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_NAMES is not set
CONFIG_ISA=y
#
# Generic Driver Options
#
+CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
-# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-# CONFIG_BLK_DEV_IDESCSI is not set
+CONFIG_BLK_DEV_IDESCSI=m
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CLK_JIFFIES=y
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
+CONFIG_TUX=m
+
+#
+# TUX options
+#
+CONFIG_TUX_EXTCGI=y
+# CONFIG_TUX_EXTENDED_LOG is not set
+# CONFIG_TUX_DEBUG is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
+# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
-# CONFIG_JFFS2_PROC is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
+CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
-CONFIG_DEBUG_HIGHMEM=y
+# CONFIG_DEBUG_HIGHMEM is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MD5=m
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_AES_586=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
-CONFIG_STANDALONE=y
CONFIG_BROKEN_ON_SMP=y
#
#
# Generic Driver Options
#
+CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
-# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_BLK_DEV_IDEFLOPPY=y
-# CONFIG_BLK_DEV_IDESCSI is not set
+CONFIG_BLK_DEV_IDESCSI=m
# CONFIG_IDE_TASK_IOCTL is not set
# CONFIG_IDE_TASKFILE_IO is not set
# QoS and/or fair queueing
#
CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CLK_JIFFIES=y
+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
+# CONFIG_NET_SCH_CLK_CPU is not set
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
+CONFIG_TUX=m
+
+#
+# TUX options
+#
+CONFIG_TUX_EXTCGI=y
+# CONFIG_TUX_EXTENDED_LOG is not set
+# CONFIG_TUX_DEBUG is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_BONDING=m
CONFIG_FB_HGA_ACCEL=y
CONFIG_FB_RIVA=m
# CONFIG_FB_RIVA_I2C is not set
+# CONFIG_FB_RIVA_DEBUG is not set
CONFIG_FB_I810=m
CONFIG_FB_I810_GTF=y
CONFIG_FB_MATROX=m
CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
-# CONFIG_JFFS2_PROC is not set
CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_SMB_NLS_DEFAULT is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
+CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
CONFIG_EARLY_PRINTK=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
-CONFIG_DEBUG_HIGHMEM=y
+# CONFIG_DEBUG_HIGHMEM is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_FRAME_POINTER is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MD5=m
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_AES_586=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_TEA=m
config CRYPTO_AES
tristate "AES cipher algorithms"
- depends on CRYPTO
+ depends on CRYPTO && !(X86 && !X86_64)
help
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
See http://csrc.nist.gov/CryptoToolkit/aes/ for more information.
+config CRYPTO_AES_586
+ tristate "AES cipher algorithms (i586)"
+ depends on CRYPTO && (X86 && !X86_64)
+ help
+ AES cipher algorithms (FIPS-197). AES uses the Rijndael
+ algorithm.
+
+ Rijndael appears to be consistently a very good performer in
+ both hardware and software across a wide range of computing
+ environments regardless of its use in feedback or non-feedback
+ modes. Its key setup time is excellent, and its key agility is
+ good. Rijndael's very low memory requirements make it very well
+ suited for restricted-space environments, in which it also
+ demonstrates excellent performance. Rijndael's operations are
+ among the easiest to defend against power and timing attacks.
+
+ The AES specifies three key sizes: 128, 192 and 256 bits
+
+ See http://csrc.nist.gov/encryption/aes/ for more information.
+
config CRYPTO_CAST5
tristate "CAST5 (CAST-128) cipher algorithm"
depends on CRYPTO
{
struct scatter_walk walk_in, walk_out;
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
- u8 tmp_src[nbytes > src->length ? bsize : 0];
- u8 tmp_dst[nbytes > dst->length ? bsize : 0];
+ u8 tmp_src[bsize];
+ u8 tmp_dst[bsize];
if (!nbytes)
return 0;
{
printk("- Added public key %X%X\n", pk->keyid[0], pk->keyid[1]);
- if (pk->expiredate && pk->expiredate < xtime.tv_sec)
- printk(" - public key has expired\n");
+// if (pk->expiredate && pk->expiredate < xtime.tv_sec)
+// printk(" - public key has expired\n");
if (pk->timestamp > xtime.tv_sec )
printk(" - key was been created %lu seconds in future\n",
struct ksign_public_key *pk;
uint8_t sha1[SHA1_DIGEST_SIZE];
MPI result = NULL;
- int rc = 0, i;
+ int rc = 0;
pk = ksign_get_public_key(sig->keyid);
if (!pk) {
acpi_set_register(ACPI_BITREG_RT_CLOCK_ENABLE, 1, ACPI_MTX_LOCK);
- file->f_pos += count;
+ *ppos += count;
result = 0;
end:
{
acpi_status status = AE_OK;
struct acpi_buffer dsdt = {ACPI_ALLOCATE_BUFFER, NULL};
- void *data = NULL;
- size_t size = 0;
+ ssize_t res;
ACPI_FUNCTION_TRACE("acpi_system_read_dsdt");
if (ACPI_FAILURE(status))
return_VALUE(-ENODEV);
- if (*ppos < dsdt.length) {
- data = dsdt.pointer + file->f_pos;
- size = dsdt.length - file->f_pos;
- if (size > count)
- size = count;
- if (copy_to_user(buffer, data, size)) {
- acpi_os_free(dsdt.pointer);
- return_VALUE(-EFAULT);
- }
- }
-
+ res = simple_read_from_buffer(buffer, count, ppos,
+ dsdt.pointer, dsdt.length);
acpi_os_free(dsdt.pointer);
- *ppos += size;
-
- return_VALUE(size);
+ return_VALUE(res);
}
{
acpi_status status = AE_OK;
struct acpi_buffer fadt = {ACPI_ALLOCATE_BUFFER, NULL};
- void *data = NULL;
- size_t size = 0;
+ ssize_t res;
ACPI_FUNCTION_TRACE("acpi_system_read_fadt");
if (ACPI_FAILURE(status))
return_VALUE(-ENODEV);
- if (*ppos < fadt.length) {
- data = fadt.pointer + file->f_pos;
- size = fadt.length - file->f_pos;
- if (size > count)
- size = count;
- if (copy_to_user(buffer, data, size)) {
- acpi_os_free(fadt.pointer);
- return_VALUE(-EFAULT);
- }
- }
-
+ res = simple_read_from_buffer(buffer, count, ppos,
+ fadt.pointer, fadt.length);
acpi_os_free(fadt.pointer);
- *ppos += size;
-
- return_VALUE(size);
+ return_VALUE(res);
}
if (alignment <= 0x10) {
t = kmalloc (size, flags);
- if ((unsigned int)t & (alignment-1)) {
+ if ((unsigned long)t & (alignment-1)) {
printk ("Kmalloc doesn't align things correctly! %p\n", t);
kfree (t);
return aligned_kmalloc (size, flags, alignment * 4);
#endif
-extern const struct atmdev_ops fore200e_ops;
-extern const struct fore200e_bus fore200e_bus[];
+static const struct atmdev_ops fore200e_ops;
+static const struct fore200e_bus fore200e_bus[];
static struct fore200e* fore200e_boards = NULL;
#define IF_IADBG_SUNI_STAT 0x02000000 // suni statistics
#define IF_IADBG_RESET 0x04000000
-extern unsigned int IADebugFlag;
-
#define IF_IADBG(f) if (IADebugFlag & (f))
#ifdef CONFIG_ATM_IA_DEBUG /* Debug build */
menu "Generic Driver Options"
+config STANDALONE
+ bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
+ default y
+ help
+ Select this option if you don't have magic firmware for drivers that
+ need it.
+
+ If unsure, say Y.
+
config PREVENT_FIRMWARE_BUILD
bool "Prevent firmware from being built"
default y
Say Y here to support the SWIM (Super Woz Integrated Machine) IOP
floppy controller on the Macintosh IIfx and Quadra 900/950.
+config MAC_FLOPPY
+ tristate "Support for PowerMac floppy"
+ depends on PPC_PMAC && !PPC_PMAC64
+ help
+ If you have a SWIM-3 (Super Woz Integrated Machine 3; from Apple)
+ floppy controller, say Y here. Most commonly found in PowerMacs.
+
config BLK_DEV_PS2
tristate "PS/2 ESDI hard disk support"
depends on MCA && MCA_LEGACY
static void fd_deselect( void );
static void fd_motor_off_timer( unsigned long dummy );
static void check_change( unsigned long dummy );
-static __inline__ void set_head_settle_flag( void );
-static __inline__ int get_head_settle_flag( void );
static irqreturn_t floppy_irq (int irq, void *dummy, struct pt_regs *fp);
static void fd_error( void );
static int do_format(int drive, int type, struct atari_format_descr *desc);
static void fd_times_out( unsigned long dummy );
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
-static __inline__ void copy_buffer( void *from, void *to);
static void setup_req_params( int drive );
static void redo_fd_request( void);
static int fd_ioctl( struct inode *inode, struct file *filp, unsigned int
static struct timer_list fd_timer =
TIMER_INITIALIZER(check_change, 0, 0);
-static inline void
-start_motor_off_timer(void)
+static inline void start_motor_off_timer(void)
{
mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
MotorOffTrys = 0;
}
-static inline void
-start_check_change_timer( void )
+static inline void start_check_change_timer( void )
{
mod_timer(&fd_timer, jiffies + CHECK_CHANGE_DELAY);
}
-static inline void
-start_timeout(void)
+static inline void start_timeout(void)
{
mod_timer(&timeout_timer, jiffies + FLOPPY_TIMEOUT);
}
-static inline void
-stop_timeout(void)
+static inline void stop_timeout(void)
{
del_timer(&timeout_timer);
}
* seek operation, because we don't use seeks with verify.
*/
-static __inline__ void set_head_settle_flag( void )
+static inline void set_head_settle_flag(void)
{
HeadSettleFlag = FDCCMDADD_E;
}
-static __inline__ int get_head_settle_flag( void )
+static inline int get_head_settle_flag(void)
{
int tmp = HeadSettleFlag;
HeadSettleFlag = 0;
return( tmp );
}
+static inline void copy_buffer(void *from, void *to)
+{
+ ulong *p1 = (ulong *)from, *p2 = (ulong *)to;
+ int cnt;
+
+ for (cnt = 512/4; cnt; cnt--)
+ *p2++ = *p1++;
+}
+
return 0;
}
-static __inline__ void copy_buffer(void *from, void *to)
-{
- ulong *p1 = (ulong *)from, *p2 = (ulong *)to;
- int cnt;
-
- for( cnt = 512/4; cnt; cnt-- )
- *p2++ = *p1++;
-}
-
/* This sets up the global variables describing the current request. */
#include <linux/init.h>
#include <linux/hdreg.h>
#include <linux/spinlock.h>
+#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/io.h>
int cciss_ioctl32_passthru(unsigned int fd, unsigned cmd, unsigned long arg,
struct file *file)
{
- IOCTL32_Command_struct *arg32 =
- (IOCTL32_Command_struct *) arg;
+ IOCTL32_Command_struct __user *arg32 =
+ (IOCTL32_Command_struct __user *) arg;
IOCTL_Command_struct arg64;
- mm_segment_t old_fs;
+ IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
int err;
- unsigned long cp;
+ u32 cp;
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
err |= get_user(arg64.buf_size, &arg32->buf_size);
err |= get_user(cp, &arg32->buf);
- arg64.buf = (BYTE *)cp;
+ arg64.buf = compat_ptr(cp);
+ err |= copy_to_user(p, &arg64, sizeof(arg64));
if (err)
return -EFAULT;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_ioctl(fd, CCISS_PASSTHRU, (unsigned long) &arg64);
- set_fs(old_fs);
+ err = sys_ioctl(fd, CCISS_PASSTHRU, (unsigned long) p);
if (err)
return err;
- err |= copy_to_user(&arg32->error_info, &arg64.error_info, sizeof(&arg32->error_info));
+ err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(&arg32->error_info));
if (err)
return -EFAULT;
return err;
}
+
int cciss_ioctl32_big_passthru(unsigned int fd, unsigned cmd, unsigned long arg,
struct file *file)
{
- BIG_IOCTL32_Command_struct *arg32 =
- (BIG_IOCTL32_Command_struct *) arg;
+ BIG_IOCTL32_Command_struct __user *arg32 =
+ (BIG_IOCTL32_Command_struct __user *) arg;
BIG_IOCTL_Command_struct arg64;
- mm_segment_t old_fs;
+ BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
int err;
- unsigned long cp;
+ u32 cp;
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
err |= get_user(arg64.buf_size, &arg32->buf_size);
err |= get_user(arg64.malloc_size, &arg32->malloc_size);
err |= get_user(cp, &arg32->buf);
- arg64.buf = (BYTE *)cp;
+ arg64.buf = compat_ptr(cp);
+ err |= copy_to_user(p, &arg64, sizeof(arg64));
if (err)
return -EFAULT;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_ioctl(fd, CCISS_BIG_PASSTHRU, (unsigned long) &arg64);
- set_fs(old_fs);
+ err = sys_ioctl(fd, CCISS_BIG_PASSTHRU, (unsigned long) p);
if (err)
return err;
- err |= copy_to_user(&arg32->error_info, &arg64.error_info, sizeof(&arg32->error_info));
+ err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(&arg32->error_info));
if (err)
return -EFAULT;
return err;
printk("\n");
} else
DPRINT("botched floppy option\n");
- DPRINT("Read linux/Documentation/floppy.txt\n");
+ DPRINT("Read Documentation/floppy.txt\n");
return 0;
}
return 0;
}
+EXPORT_SYMBOL(blk_queue_resize_tags);
+
/**
* blk_queue_end_tag - end tag operations for a request
* @q: the request queue for the device
*
* A matching blk_rq_unmap_user() must be issued at the end of io, while
* still in process context.
+ *
+ * Note: The mapped bio may need to be bounced through blk_queue_bounce()
+ * before being submitted to the device, as pages mapped may be out of
+ * reach. It's the callers responsibility to make sure this happens. The
+ * original bio must be passed back in to blk_rq_unmap_user() for proper
+ * unmapping.
*/
struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
unsigned int len)
{
- struct request *rq = NULL;
- char *buf = NULL;
+ unsigned long uaddr;
+ struct request *rq;
struct bio *bio;
- int ret;
+
+ if (len > (q->max_sectors << 9))
+ return ERR_PTR(-EINVAL);
+ if ((!len && ubuf) || (len && !ubuf))
+ return ERR_PTR(-EINVAL);
rq = blk_get_request(q, rw, __GFP_WAIT);
if (!rq)
return ERR_PTR(-ENOMEM);
- bio = bio_map_user(q, NULL, (unsigned long) ubuf, len, rw == READ);
- if (!bio) {
- int bytes = (len + 511) & ~511;
-
- buf = kmalloc(bytes, q->bounce_gfp | GFP_USER);
- if (!buf) {
- ret = -ENOMEM;
- goto fault;
- }
-
- if (rw == WRITE) {
- if (copy_from_user(buf, ubuf, len)) {
- ret = -EFAULT;
- goto fault;
- }
- } else
- memset(buf, 0, len);
- }
+ /*
+ * if alignment requirement is satisfied, map in user pages for
+ * direct dma. else, set up kernel bounce buffers
+ */
+ uaddr = (unsigned long) ubuf;
+ if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+ bio = bio_map_user(q, NULL, uaddr, len, rw == READ);
+ else
+ bio = bio_copy_user(q, uaddr, len, rw == READ);
- rq->bio = rq->biotail = bio;
- if (rq->bio)
+ if (!IS_ERR(bio)) {
+ rq->bio = rq->biotail = bio;
blk_rq_bio_prep(q, rq, bio);
- rq->buffer = rq->data = buf;
- rq->data_len = len;
- return rq;
-fault:
- if (buf)
- kfree(buf);
- if (bio)
- bio_unmap_user(bio, 1);
- if (rq)
- blk_put_request(rq);
+ rq->buffer = rq->data = NULL;
+ rq->data_len = len;
+ return rq;
+ }
- return ERR_PTR(ret);
+ /*
+ * bio is the err-ptr
+ */
+ blk_put_request(rq);
+ return (struct request *) bio;
}
EXPORT_SYMBOL(blk_rq_map_user);
* Description:
* Unmap a request previously mapped by blk_rq_map_user().
*/
-int blk_rq_unmap_user(struct request *rq, void __user *ubuf, struct bio *bio,
- unsigned int ulen)
+int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
{
- const int read = rq_data_dir(rq) == READ;
int ret = 0;
- if (bio)
- bio_unmap_user(bio, read);
- if (rq->buffer) {
- if (read && copy_to_user(ubuf, rq->buffer, ulen))
- ret = -EFAULT;
- kfree(rq->buffer);
+ if (bio) {
+ if (bio_flagged(bio, BIO_USER_MAPPED))
+ bio_unmap_user(bio);
+ else
+ ret = bio_uncopy_user(bio);
}
blk_put_request(rq);
static int sock_xmit(struct socket *sock, int send, void *buf, int size,
int msg_flags)
{
- mm_segment_t oldfs;
int result;
struct msghdr msg;
- struct iovec iov;
+ struct kvec iov;
unsigned long flags;
sigset_t oldset;
- oldfs = get_fs();
- set_fs(get_ds());
/* Allow interception of SIGKILL only
* Don't allow other signals to interrupt the transmission */
spin_lock_irqsave(¤t->sighand->siglock, flags);
iov.iov_len = size;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (send)
- result = sock_sendmsg(sock, &msg, size);
+ result = kernel_sendmsg(sock, &msg, &iov, 1, size);
else
- result = sock_recvmsg(sock, &msg, size, 0);
+ result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
if (signal_pending(current)) {
siginfo_t info;
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
- set_fs(oldfs);
return result;
}
-#define PPCSTRUCT(pi) ((PPC *)(pi->private))
+#define PPCSTRUCT(pi) ((Interface *)(pi->private))
/****************************************************************/
/*
static int bpck6_init_proto(PIA *pi)
{
- PPC *p = kmalloc(sizeof(PPC), GFP_KERNEL);
+ Interface *p = kmalloc(sizeof(Interface), GFP_KERNEL);
if (p) {
- memset(p, 0, sizeof(PPC));
+ memset(p, 0, sizeof(Interface));
pi->private = (unsigned long)p;
return 0;
}
u8 org_data; // original LPT data port contents
u8 org_ctrl; // original LPT control port contents
u8 cur_ctrl; // current control port contents
-} PPC;
+} Interface;
//***************************************************************************
//***************************************************************************
-static int ppc6_select(PPC *ppc);
-static void ppc6_deselect(PPC *ppc);
-static void ppc6_send_cmd(PPC *ppc, u8 cmd);
-static void ppc6_wr_data_byte(PPC *ppc, u8 data);
-static u8 ppc6_rd_data_byte(PPC *ppc);
-static u8 ppc6_rd_port(PPC *ppc, u8 port);
-static void ppc6_wr_port(PPC *ppc, u8 port, u8 data);
-static void ppc6_rd_data_blk(PPC *ppc, u8 *data, long count);
-static void ppc6_wait_for_fifo(PPC *ppc);
-static void ppc6_wr_data_blk(PPC *ppc, u8 *data, long count);
-static void ppc6_rd_port16_blk(PPC *ppc, u8 port, u8 *data, long length);
-static void ppc6_wr_port16_blk(PPC *ppc, u8 port, u8 *data, long length);
-static void ppc6_wr_extout(PPC *ppc, u8 regdata);
-static int ppc6_open(PPC *ppc);
-static void ppc6_close(PPC *ppc);
+static int ppc6_select(Interface *ppc);
+static void ppc6_deselect(Interface *ppc);
+static void ppc6_send_cmd(Interface *ppc, u8 cmd);
+static void ppc6_wr_data_byte(Interface *ppc, u8 data);
+static u8 ppc6_rd_data_byte(Interface *ppc);
+static u8 ppc6_rd_port(Interface *ppc, u8 port);
+static void ppc6_wr_port(Interface *ppc, u8 port, u8 data);
+static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count);
+static void ppc6_wait_for_fifo(Interface *ppc);
+static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count);
+static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
+static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
+static void ppc6_wr_extout(Interface *ppc, u8 regdata);
+static int ppc6_open(Interface *ppc);
+static void ppc6_close(Interface *ppc);
//***************************************************************************
-static int ppc6_select(PPC *ppc)
+static int ppc6_select(Interface *ppc)
{
u8 i, j, k;
//***************************************************************************
-static void ppc6_deselect(PPC *ppc)
+static void ppc6_deselect(Interface *ppc)
{
if (ppc->mode & 4) // EPP
ppc->cur_ctrl |= port_init;
//***************************************************************************
-static void ppc6_send_cmd(PPC *ppc, u8 cmd)
+static void ppc6_send_cmd(Interface *ppc, u8 cmd)
{
switch(ppc->mode)
{
//***************************************************************************
-static void ppc6_wr_data_byte(PPC *ppc, u8 data)
+static void ppc6_wr_data_byte(Interface *ppc, u8 data)
{
switch(ppc->mode)
{
//***************************************************************************
-static u8 ppc6_rd_data_byte(PPC *ppc)
+static u8 ppc6_rd_data_byte(Interface *ppc)
{
u8 data = 0;
//***************************************************************************
-static u8 ppc6_rd_port(PPC *ppc, u8 port)
+static u8 ppc6_rd_port(Interface *ppc, u8 port)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ));
//***************************************************************************
-static void ppc6_wr_port(PPC *ppc, u8 port, u8 data)
+static void ppc6_wr_port(Interface *ppc, u8 port, u8 data)
{
ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE));
//***************************************************************************
-static void ppc6_rd_data_blk(PPC *ppc, u8 *data, long count)
+static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
//***************************************************************************
-static void ppc6_wait_for_fifo(PPC *ppc)
+static void ppc6_wait_for_fifo(Interface *ppc)
{
int i;
//***************************************************************************
-static void ppc6_wr_data_blk(PPC *ppc, u8 *data, long count)
+static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count)
{
switch(ppc->mode)
{
//***************************************************************************
-static void ppc6_rd_port16_blk(PPC *ppc, u8 port, u8 *data, long length)
+static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
//***************************************************************************
-static void ppc6_wr_port16_blk(PPC *ppc, u8 port, u8 *data, long length)
+static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
{
length = length << 1;
//***************************************************************************
-static void ppc6_wr_extout(PPC *ppc, u8 regdata)
+static void ppc6_wr_extout(Interface *ppc, u8 regdata)
{
ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE));
//***************************************************************************
-static int ppc6_open(PPC *ppc)
+static int ppc6_open(Interface *ppc)
{
int ret;
//***************************************************************************
-static void ppc6_close(PPC *ppc)
+static void ppc6_close(Interface *ppc)
{
ppc6_deselect(ppc);
}
if (size < 0)
return -EINVAL;
if (size > (q->max_sectors << 9))
- return -EINVAL;
+ size = q->max_sectors << 9;
q->sg_reserved_size = size;
return 0;
rq->flags |= REQ_BLOCK_PC;
bio = rq->bio;
+ /*
+ * bounce this after holding a reference to the original bio, it's
+ * needed for proper unmapping
+ */
+ if (rq->bio)
+ blk_queue_bounce(q, &rq->bio);
+
rq->timeout = (hdr->timeout * HZ) / 1000;
if (!rq->timeout)
rq->timeout = q->sg_timeout;
hdr->sb_len_wr = len;
}
- if (blk_rq_unmap_user(rq, hdr->dxferp, bio, hdr->dxfer_len))
+ if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len))
return -EFAULT;
/* may not have succeeded, but output values written to control
disk->fops = &floppy_fops;
disk->private_data = &floppy_states[i];
disk->queue = swim3_queue;
+ disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", i);
sprintf(disk->devfs_name, "floppy/%d", i);
set_capacity(disk, 2880);
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
outb(0x80, iobase + 0x30);
/* Wait some time */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 100);
+ msleep(10);
/* Turn FPGA on */
outb(0x00, iobase + 0x30);
outb((0x0f << RTS_LEVEL_SHIFT_BITS) | 1, iobase + REG_RX_CONTROL);
/* Timeout before it is safe to send the first HCI packet */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout((HZ * 5) / 4); // or set it to 3/2
+ msleep(1250);
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/delay.h>
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
}
/* Timeout before it is safe to send the first HCI packet */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ msleep(1000);
/* Register HCI device */
err = hci_register_dev(hdev);
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
btuart_change_speed(info, DEFAULT_BAUD_RATE);
/* Timeout before it is safe to send the first HCI packet */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ msleep(1000);
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
spin_unlock_irqrestore(&(info->lock), flags);
/* Timeout before it is safe to send the first HCI packet */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ * 2);
+ msleep(2000);
/* Register HCI device */
if (hci_register_dev(hdev) < 0) {
struct sk_buff *skb;
unsigned long flags;
- BT_ERR("Timeout, retransmitting %u pkts", bcsp->unack.qlen);
+ BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen);
+
spin_lock_irqsave(&bcsp->unack.lock, flags);
while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) {
#define URB_ZERO_PACKET 0
#endif
-#define VERSION "2.6"
+#define VERSION "2.7"
static struct usb_driver hci_usb_driver;
/* AVM BlueFRITZ! USB v2.0 */
{ USB_DEVICE(0x057c, 0x3800) },
- /* Ericsson with non-standard id */
- { USB_DEVICE(0x0bdb, 0x1002) },
+ /* Bluetooth Ultraport Module from IBM */
+ { USB_DEVICE(0x04bf, 0x030a) },
- /* ALPS Module with non-standard id */
+ /* ALPS Modules with non-standard id */
+ { USB_DEVICE(0x044e, 0x3001) },
{ USB_DEVICE(0x044e, 0x3002) },
- /* Bluetooth Ultraport Module from IBM */
- { USB_DEVICE(0x04bf, 0x030a) },
+ /* Ericsson with non-standard id */
+ { USB_DEVICE(0x0bdb, 0x1002) },
{ } /* Terminating entry */
};
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET },
+ /* ISSC Bluetooth Adapter v3.1 */
+ { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET },
+
/* Digianswer device */
{ USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER },
BT_DBG("%s", hdev->name);
- for (i=0; i < 4; i++)
+ for (i = 0; i < 4; i++)
skb_queue_purge(&husb->transmit_q[i]);
return 0;
}
-static inline void hci_usb_wait_for_urb(struct urb *urb)
-{
- while (atomic_read(&urb->kref.refcount) > 1) {
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout((5 * HZ + 999) / 1000);
- }
-}
-
static void hci_usb_unlink_urbs(struct hci_usb *husb)
{
int i;
BT_DBG("%s", husb->hdev->name);
- for (i=0; i < 4; i++) {
+ for (i = 0; i < 4; i++) {
struct _urb *_urb;
struct urb *urb;
urb = &_urb->urb;
BT_DBG("%s unlinking _urb %p type %d urb %p",
husb->hdev->name, _urb, _urb->type, urb);
- usb_unlink_urb(urb);
- hci_usb_wait_for_urb(urb);
+ usb_kill_urb(urb);
_urb_queue_tail(__completed_q(husb, _urb->type), _urb);
}
}
file->private_data = hci_vhci;
- return 0;
+ return nonseekable_open(inode, file);
}
static int hci_vhci_chr_close(struct inode *inode, struct file *file)
Werner Zimmermann, August 8, 1995
V1.70 Multisession support now is completed, but there is still not
enough testing done. If you can test it, please contact me. For
- details please read /usr/src/linux/Documentation/cdrom/aztcd
+ details please read Documentation/cdrom/aztcd
Werner Zimmermann, August 19, 1995
V1.80 Modification to suit the new kernel boot procedure introduced
with kernel 1.3.33. Will definitely not work with older kernels.
goto err;
if (fp->f_mode & FMODE_WRITE) {
ret = -EROFS;
- if (!CDROM_CAN(CDC_RAM))
- goto err;
if (cdrom_open_write(cdi))
goto err;
+ if (!CDROM_CAN(CDC_RAM))
+ goto err;
ret = 0;
}
}
struct packet_command cgc;
int nr, ret;
+ cdi->last_sense = 0;
+
memset(&cgc, 0, sizeof(cgc));
/*
if (!q)
return -ENXIO;
+ cdi->last_sense = 0;
+
while (nframes) {
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
rq->timeout = 60 * HZ;
bio = rq->bio;
+ if (rq->bio)
+ blk_queue_bounce(q, &rq->bio);
+
if (blk_execute_rq(q, cdi->disk, rq)) {
struct request_sense *s = rq->sense;
ret = -EIO;
cdi->last_sense = s->sense_key;
}
- if (blk_rq_unmap_user(rq, ubuf, bio, len))
+ if (blk_rq_unmap_user(rq, bio, len))
ret = -EFAULT;
if (ret)
nframes -= nr;
lba += nr;
+ ubuf += len;
}
return ret;
} cdrom_sysctl_settings;
int cdrom_sysctl_info(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int pos;
struct cdrom_device_info *cdi;
char *info = cdrom_sysctl_settings.info;
- if (!*lenp || (filp->f_pos && !write)) {
+ if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
strcpy(info+pos,"\n\n");
- return proc_dostring(ctl, write, filp, buffer, lenp);
+ return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
}
/* Unfortunately, per device settings are not implemented through
}
static int cdrom_sysctl_handler(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (write && *valp != val) {
#endif /* MULTISESSION */
if (disk_info.multi)
printk(KERN_WARNING "optcd: Multisession support experimental, "
- "see linux/Documentation/cdrom/optcd\n");
+ "see Documentation/cdrom/optcd\n");
DEBUG((DEBUG_TOC, "exiting update_toc"));
/*==========================================================================*/
-#if FUTURE
+#ifdef FUTURE
static DECLARE_WAIT_QUEUE_HEAD(sbp_waitq);
#endif /* FUTURE */
u_char TocEnt_number;
u_char TocEnt_format; /* em */
u_int TocEnt_address;
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
char has_data;
#endif /* SAFE_MIXED */
u_char ored_ctl_adr; /* to detect if CDROM contains data tracks */
return (0);
}
/*==========================================================================*/
-#if FUTURE
+#ifdef FUTURE
static int cc_SubChanInfo(int frame, int count, u_char *buffer)
/* "frame" is a RED BOOK (msf-bin) address */
{
return (0);
}
/*==========================================================================*/
-#if FUTURE
+#ifdef FUTURE
/*
* obtain if requested service disturbs current audio state
*/
/*==========================================================================*/
-#if FUTURE
+#ifdef FUTURE
/*
* called always if driver gets entered
* returns 0 or ERROR2 or ERROR15
case CDROMREADMODE1:
msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n");
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
cc_ModeSelect(CD_FRAMESIZE);
case CDROMREADMODE2: /* not usable at the moment */
msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n");
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
cc_ModeSelect(CD_FRAMESIZE_RAW1);
if (famL_drive) RETURN_UP(-EINVAL);
if (famV_drive) RETURN_UP(-EINVAL);
if (famT_drive) RETURN_UP(-EINVAL);
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL);
- if (copy_from_user(&read_audio, (void *)arg,
+ if (copy_from_user(&read_audio, (void __user *)arg,
sizeof(struct cdrom_read_audio)))
RETURN_UP(-EFAULT);
if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL);
msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i);
continue;
}
- if (copy_to_user((u_char *)read_audio.buf,
- (u_char *) current_drive->aud_buf,
+ if (copy_to_user(read_audio.buf,
+ current_drive->aud_buf,
read_audio.nframes * CD_FRAMESIZE_RAW))
RETURN_UP(-EFAULT);
msg(DBG_AUD,"read_audio: copy_to_user done.\n");
case CDROMPLAYMSF:
msg(DBG_IOC,"ioctl: CDROMPLAYMSF entered.\n");
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
if (current_drive->audio_state==audio_playing)
case CDROMPLAYTRKIND: /* Play a track. This currently ignores index. */
msg(DBG_IOC,"ioctl: CDROMPLAYTRKIND entered.\n");
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
if (current_drive->audio_state==audio_playing)
case CDROMSTOP: /* Spin down the drive */
msg(DBG_IOC,"ioctl: CDROMSTOP entered.\n");
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
if (current_drive->has_data>1) RETURN_UP(-EBUSY);
#endif /* SAFE_MIXED */
i=cc_Pause_Resume(1);
goto request_loop;
}
-#if FUTURE
+#ifdef FUTURE
i=prepare(0,0); /* at moment not really a hassle check, but ... */
if (i!=0)
msg(DBG_INF,"\"prepare\" tells error %d -- ignored\n", i);
sbp_sleep(0);
if (sbp_data(req) != 0)
{
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
current_drive->has_data=2; /* is really a data disk */
#endif /* SAFE_MIXED */
#ifdef DEBUG_GTL
if ((current_drive->ored_ctl_adr&0x40)==0)
{
msg(DBG_INF,"CD contains no data tracks.\n");
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
current_drive->has_data=0;
#endif /* SAFE_MIXED */
}
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
else if (current_drive->has_data<1) current_drive->has_data=1;
#endif /* SAFE_MIXED */
}
if (p->f_eject) cc_SpinDown();
p->diskstate_flags &= ~cd_size_bit;
p->open_count=0;
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
p->has_data=0;
#endif /* SAFE_MIXED */
}
if (port_index>0)
{
- msg(DBG_INF, "You should read linux/Documentation/cdrom/sbpcd\n");
+ msg(DBG_INF, "You should read Documentation/cdrom/sbpcd\n");
msg(DBG_INF, "and then configure sbpcd.h for your hardware.\n");
}
check_datarate();
if (p->drv_id==-1) continue;
switch_drive(p);
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
p->has_data=0;
#endif /* SAFE_MIXED */
/*
current_drive->diskstate_flags &= ~toc_bit;
/* we *don't* need invalidate here, it's done by caller */
current_drive->diskstate_flags &= ~cd_size_bit;
-#if SAFE_MIXED
+#ifdef SAFE_MIXED
current_drive->has_data=0;
#endif /* SAFE_MIXED */
/*
* Attention! This file contains user-serviceable parts!
* I recommend to make use of it...
- * If you feel helpless, look into linux/Documentation/cdrom/sbpcd
+ * If you feel helpless, look into Documentation/cdrom/sbpcd
* (good idea anyway, at least before mailing me).
*
* The definitions for the first controller can get overridden by
config ISI
tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
- depends on SERIAL_NONSTANDARD && EXPERIMENTAL && BROKEN_ON_SMP && m
+ depends on SERIAL_NONSTANDARD && PCI && EXPERIMENTAL && BROKEN_ON_SMP && m
help
This is a driver for the Multi-Tech cards which provide several
serial ports. The driver is experimental and can currently only be
config SYNCLINK
tristate "Microgate SyncLink card support"
- depends on SERIAL_NONSTANDARD
+ depends on SERIAL_NONSTANDARD && PCI
help
Provides support for the SyncLink ISA and PCI multiprotocol serial
adapters. These adapters support asynchronous and HDLC bit
If you have an Alchemy AU1000 processor (MIPS based) and you want
to use serial ports, say Y. Otherwise, say N.
-config SGI_L1_SERIAL
- bool "SGI Altix L1 serial support"
- depends on SERIAL_NONSTANDARD && IA64 && DISCONTIGMEM
- help
- If you have an SGI Altix and you want to use the serial port
- connected to the system controller (you want this!), say Y.
- Otherwise, say N.
-
-config SGI_L1_SERIAL_CONSOLE
- bool "SGI Altix L1 serial console support"
- depends on SGI_L1_SERIAL
- help
- If you have an SGI Altix and you would like to use the system
- controller serial port as your console (you want this!),
- say Y. Otherwise, say N.
-
config AU1000_SERIAL_CONSOLE
bool "Enable Au1000 serial console"
depends on AU1000_UART
console. This driver allows each pSeries partition to have a console
which is accessed via the HMC.
+config HVCS
+ tristate "IBM Hypervisor Virtual Console Server support"
+ depends on PPC_PSERIES
+ help
+ Partitionable IBM Power5 ppc64 machines allow hosting of
+ firmware virtual consoles from one Linux partition by
+ another Linux partition. This driver allows console data
+ from Linux partitions to be accessed through TTY device
+ interfaces in the device tree of a Linux partition running
+ this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hvcs.ko. Additionally, this module
+ will depend on arch specific APIs exported from hvcserver.ko
+ which will also be compiled when this driver is built as a
+ module.
+
config QIC02_TAPE
tristate "QIC-02 tape support"
help
config APPLICOM
tristate "Applicom intelligent fieldbus card support"
+ depends on PCI
---help---
This driver provides the kernel-side support for the intelligent
fieldbus cards made by Applicom International. More information
config FTAPE
tristate "Ftape (QIC-80/Travan) support"
- depends on BROKEN_ON_SMP
+ depends on BROKEN_ON_SMP && (ALPHA || X86)
---help---
If you have a tape drive that is connected to your floppy
controller, say Y here.
is assumed the platform called hpet_alloc with the RTC IRQ values for
the HPET timers.
-config HPET_NOMMAP
- bool "HPET - Control mmap capability."
- default n
+config HPET_MMAP
+ bool "Allow mmap of HPET"
+ default y
depends on HPET
help
- If you say Y here, then the mmap interface for the HPET driver returns ENOSYS.
- Some hardware implementations might not want all the memory in the page the
- HPET control registers reside to be exposed.
+ If you say Y here, user applications will be able to mmap
+ the HPET registers.
+
+ In some hardware implementations, the page containing HPET
+ registers may also contain other things that shouldn't be
+ exposed to the user. If this applies to your hardware,
+ say N here.
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-8192)"
obj-$(CONFIG_RIO) += rio/ generic_serial.o
obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
-obj-$(CONFIG_SGI_L1_SERIAL) += sn_serial.o
obj-$(CONFIG_VIOCONS) += viocons.o
obj-$(CONFIG_VIOTAPE) += viotape.o
+obj-$(CONFIG_HVCS) += hvcs.o
obj-$(CONFIG_PRINTER) += lp.o
obj-$(CONFIG_TIPAR) += tipar.o
../net/scc.c
A subset of the documentation is in
- ../../Documentation/networking/z8530drv.txt
+ Documentation/networking/z8530drv.txt
This option gives you AGP support for the GLX component of XFree86 4.x
on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
E7205 and E7505 chipsets and full support for the 810, 815, 830M, 845G,
- 852GM, 855GM and 865G integrated graphics chipsets.
+ 852GM, 855GM, 865G and I915 integrated graphics chipsets.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI, or if you have any Intel integrated graphics
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
+ /* VIA K8T890 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_VIA,
- .device = PCI_DEVICE_ID_VIA_8380_0,
+ .device = PCI_DEVICE_ID_VIA_3238_0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
+ /* VIA K8T800/K8M800/K8N800 */
+ {
+ .class = (PCI_CLASS_BRIDGE_HOST << 8),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = PCI_DEVICE_ID_VIA_838X_1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+
/* NForce3 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
/*
- * HP AGPGART routines.
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * Bjorn Helgaas <bjorn_helgaas@hp.com>
+ * HP zx1 AGPGART routines.
+ *
+ * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
/*
* Intel(R) 855GM/852GM and 865G support added by David Dawes
* <dawes@tungstengraphics.com>.
+ *
+ * Intel(R) 915G support added by Alan Hourihane
+ * <alanh@tungstengraphics.com>.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/pagemap.h>
#include <linux/agp_backend.h>
#include "agp.h"
#define INTEL_I850_MCHCFG 0x50
#define INTEL_I850_ERRSTS 0xc8
+/* intel 915G registers */
+#define I915_GMADDR 0x18
+#define I915_MMADDR 0x10
+#define I915_PTEADDR 0x1C
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+
+
/* Intel 7505 registers */
#define INTEL_I7505_APSIZE 0x74
#define INTEL_I7505_NCAPID 0x60
return;
}
+/* Exists to support ARGB cursors */
+static void *i8xx_alloc_pages(void)
+{
+ struct page * page;
+
+ page = alloc_pages(GFP_KERNEL, 2);
+ if (page == NULL) {
+ return 0;
+ }
+ if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
+ __free_page(page);
+ return 0;
+ }
+ get_page(page);
+ SetPageLocked(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page_address(page);
+}
+
+static void i8xx_destroy_pages(void *addr)
+{
+ struct page *page;
+
+ if (addr == NULL)
+ return;
+
+ page = virt_to_page(addr);
+ change_page_attr(page, 4, PAGE_KERNEL);
+ put_page(page);
+ unlock_page(page);
+ free_pages((unsigned long)addr, 2);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
struct agp_memory *new;
void *addr;
- if (pg_count != 1)
+ if (pg_count != 1 && pg_count != 4)
return NULL;
- addr = agp_bridge->driver->agp_alloc_page();
+ switch (pg_count) {
+ case 1: addr = agp_bridge->driver->agp_alloc_page();
+ break;
+ case 4:
+ /* kludge to get 4 physical pages for ARGB cursor */
+ addr = i8xx_alloc_pages();
+ break;
+ default:
+ return NULL;
+ }
+
if (addr == NULL)
return NULL;
- new = agp_create_memory(1);
+ new = agp_create_memory(pg_count);
if (new == NULL)
return NULL;
- new->memory[0] = agp_bridge->driver->mask_memory(virt_to_phys(addr), type);
- new->page_count = 1;
- new->num_scratch_pages = 1;
+ new->memory[0] = virt_to_phys(addr);
+ if (pg_count == 4) {
+ /* kludge to get 4 physical pages for ARGB cursor */
+ new->memory[1] = new->memory[0] + PAGE_SIZE;
+ new->memory[2] = new->memory[1] + PAGE_SIZE;
+ new->memory[3] = new->memory[2] + PAGE_SIZE;
+ }
+ new->page_count = pg_count;
+ new->num_scratch_pages = pg_count;
new->type = AGP_PHYS_MEMORY;
new->physical = new->memory[0];
return new;
{
agp_free_key(curr->key);
if(curr->type == AGP_PHYS_MEMORY) {
- agp_bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[0]));
+ if (curr->page_count == 4)
+ i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
+ else
+ agp_bridge->driver->agp_destroy_page(
+ phys_to_virt(curr->memory[0]));
vfree(curr->memory);
}
kfree(curr);
{
{128, 32768, 5},
/* The 64M mode still requires a 128k gatt */
- {64, 16384, 5}
+ {64, 16384, 5},
+ {256, 65536, 6},
};
static struct _intel_i830_private {
struct pci_dev *i830_dev; /* device one */
volatile u8 *registers;
+ volatile u32 *gtt; /* I915G */
int gtt_entries;
} intel_i830_private;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
+ int size;
pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ /* We obtain the size of the GTT, which is also stored (for some
+ * reason) at the top of stolen memory. Then we add 4KB to that
+ * for the video BIOS popup, which is also stored in there. */
+ size = agp_bridge->driver->fetch_size() + 4;
+
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I830_GMCH_GMS_STOLEN_512:
- gtt_entries = KB(512) - KB(132);
+ gtt_entries = KB(512) - KB(size);
break;
case I830_GMCH_GMS_STOLEN_1024:
- gtt_entries = MB(1) - KB(132);
+ gtt_entries = MB(1) - KB(size);
break;
case I830_GMCH_GMS_STOLEN_8192:
- gtt_entries = MB(8) - KB(132);
+ gtt_entries = MB(8) - KB(size);
break;
case I830_GMCH_GMS_LOCAL:
rdct = INREG8(intel_i830_private.registers,
} else {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
- gtt_entries = MB(1) - KB(132);
+ gtt_entries = MB(1) - KB(size);
break;
case I855_GMCH_GMS_STOLEN_4M:
- gtt_entries = MB(4) - KB(132);
+ gtt_entries = MB(4) - KB(size);
break;
case I855_GMCH_GMS_STOLEN_8M:
- gtt_entries = MB(8) - KB(132);
+ gtt_entries = MB(8) - KB(size);
break;
case I855_GMCH_GMS_STOLEN_16M:
- gtt_entries = MB(16) - KB(132);
+ gtt_entries = MB(16) - KB(size);
break;
case I855_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(132);
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case I915_GMCH_GMS_STOLEN_48M:
+ /* Check it's really I915G */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB)
+ gtt_entries = MB(48) - KB(size);
+ else
+ gtt_entries = 0;
break;
+ case I915_GMCH_GMS_STOLEN_64M:
+ /* Check it's really I915G */
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB)
+ gtt_entries = MB(64) - KB(size);
+ else
+ gtt_entries = 0;
default:
gtt_entries = 0;
break;
agp_bridge->aperture_size_idx = 0;
return(values[0].size);
} else {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
+ agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
agp_bridge->aperture_size_idx = 1;
return(values[1].size);
}
return(NULL);
}
+static int intel_i915_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
+
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
+
+ OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED);
+ global_cache_flush();
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++)
+ OUTREG32(intel_i830_private.gtt, i, agp_bridge->scratch_page);
+ }
+
+ return (0);
+}
+
+static void intel_i915_cleanup(void)
+{
+ iounmap((void *) intel_i830_private.gtt);
+ iounmap((void *) intel_i830_private.registers);
+}
+
+static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
+ int type)
+{
+ int i,j,num_entries;
+ void *temp;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_i830_private.gtt_entries) {
+ printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
+ pg_start,intel_i830_private.gtt_entries);
+
+ printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ return (-EINVAL);
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ return (-EINVAL);
+
+ /* The i830 can't check the GTT for entries since its read only,
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if ((type != 0 && type != AGP_PHYS_MEMORY) ||
+ (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
+ return (-EINVAL);
+
+ global_cache_flush();
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
+ OUTREG32(intel_i830_private.gtt, j, agp_bridge->driver->mask_memory(mem->memory[i], mem->type));
+
+ global_cache_flush();
+
+ agp_bridge->driver->tlb_flush(mem);
+
+ return(0);
+}
+
+static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
+ int type)
+{
+ int i;
+
+ global_cache_flush();
+
+ if (pg_start < intel_i830_private.gtt_entries) {
+ printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ return (-EINVAL);
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++)
+ OUTREG32(intel_i830_private.gtt, i, agp_bridge->scratch_page);
+
+ global_cache_flush();
+
+ agp_bridge->driver->tlb_flush(mem);
+
+ return (0);
+}
+
+static int intel_i915_fetch_size(void)
+{
+ struct aper_size_info_fixed *values;
+ u32 temp, offset = 0;
+
+#define I915_256MB_ADDRESS_MASK (1<<27)
+
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
+ if (temp & I915_256MB_ADDRESS_MASK)
+ offset = 0; /* 128MB aperture */
+ else
+ offset = 2; /* 256MB aperture */
+ agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
+ return(values[offset].size);
+}
+
+/* The intel i915 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i915_create_gatt_table(void)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp, temp2;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = 0;
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_MMADDR, &temp);
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_PTEADDR,&temp2);
+
+ intel_i830_private.gtt = (volatile u32 *) ioremap(temp2, 256 * 1024);
+ if (!intel_i830_private.gtt)
+ return (-ENOMEM);
+
+ temp &= 0xfff80000;
+
+ intel_i830_private.registers = (volatile u8 *) ioremap(temp,128 * 4096);
+ if (!intel_i830_private.registers)
+ return (-ENOMEM);
+
+ temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush();
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return(0);
+}
+
static int intel_fetch_size(void)
{
int i;
.owner = THIS_MODULE,
.aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 2,
+ .num_aperture_sizes = 3,
.needs_scratch_page = TRUE,
.configure = intel_i830_configure,
.fetch_size = intel_i830_fetch_size,
.agp_destroy_page = agp_generic_destroy_page,
};
+static struct agp_bridge_driver intel_915_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 3,
+ .needs_scratch_page = TRUE,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i915_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .tlb_flush = intel_i810_tlbflush,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i915_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
+
+
static struct agp_bridge_driver intel_7505_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
bridge->driver = &intel_845_driver;
name = "i875";
break;
+ case PCI_DEVICE_ID_INTEL_82915G_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82915G_IG)) {
+ bridge->driver = &intel_915_driver;
+ } else {
+ bridge->driver = &intel_845_driver;
+ }
+ name = "915G";
+ break;
case PCI_DEVICE_ID_INTEL_7505_0:
bridge->driver = &intel_7505_driver;
- name = "E7505";
+ name = "E7505";
break;
case PCI_DEVICE_ID_INTEL_7205_0:
bridge->driver = &intel_7505_driver;
intel_845_configure();
else if (bridge->driver == &intel_830mp_driver)
intel_830mp_configure();
+ else if (bridge->driver == &intel_915_driver)
+ intel_i915_configure();
return 0;
}
static struct agp_device_ids sis_agp_device_ids[] __devinitdata =
{
+ {
+ .device_id = PCI_DEVICE_ID_SI_5591_AGP,
+ .chipset_name = "5591",
+ },
{
.device_id = PCI_DEVICE_ID_SI_530,
.chipset_name = "530",
.device_id = PCI_DEVICE_ID_VIA_PX8X0_0,
.chipset_name = "PM800/PN800/PM880/PN880",
},
+ /* KT880 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_3269_0,
+ .chipset_name = "KT880",
+ },
+ /* KTxxx/Px8xx */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_83_87XX_1,
+ .chipset_name = "VT83xx/VT87xx/KTxxx/Px8xx",
+ },
+ /* P4M800 */
+ {
+ .device_id = PCI_DEVICE_ID_VIA_3296_0,
+ .chipset_name = "P4M800",
+ },
{ }, /* dummy final entry, always present */
};
ID(PCI_DEVICE_ID_VIA_8378_0),
ID(PCI_DEVICE_ID_VIA_PT880),
ID(PCI_DEVICE_ID_VIA_8783_0),
- ID(PCI_DEVICE_ID_VIA_PX8X0_0),
+ ID(PCI_DEVICE_ID_VIA_PX8X0_0),
+ ID(PCI_DEVICE_ID_VIA_3269_0),
+ ID(PCI_DEVICE_ID_VIA_83_87XX_1),
+ ID(PCI_DEVICE_ID_VIA_3296_0),
{ }
};
int version_minor; /**< Minor version */
int version_patchlevel;/**< Patch level */
size_t name_len; /**< Length of name buffer */
- char *name; /**< Name of driver */
+ char __user *name; /**< Name of driver */
size_t date_len; /**< Length of date buffer */
- char *date; /**< User-space buffer to hold date */
+ char __user *date; /**< User-space buffer to hold date */
size_t desc_len; /**< Length of desc buffer */
- char *desc; /**< User-space buffer to hold desc */
+ char __user *desc; /**< User-space buffer to hold desc */
} drm_version_t;
*/
typedef struct drm_unique {
size_t unique_len; /**< Length of unique */
- char *unique; /**< Unique name for driver instantiation */
+ char __user *unique; /**< Unique name for driver instantiation */
} drm_unique_t;
typedef struct drm_list {
int count; /**< Length of user-space structures */
- drm_version_t *version;
+ drm_version_t __user *version;
} drm_list_t;
*/
typedef struct drm_buf_info {
int count; /**< Entries in list */
- drm_buf_desc_t *list;
+ drm_buf_desc_t __user *list;
} drm_buf_info_t;
*/
typedef struct drm_buf_free {
int count;
- int *list;
+ int __user *list;
} drm_buf_free_t;
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
- void *address; /**< Address of buffer */
+ void __user *address; /**< Address of buffer */
} drm_buf_pub_t;
*/
typedef struct drm_buf_map {
int count; /**< Length of the buffer list */
- void *virtual; /**< Mmap'd area in user-virtual */
- drm_buf_pub_t *list; /**< Buffer information */
+ void __user *virtual; /**< Mmap'd area in user-virtual */
+ drm_buf_pub_t __user *list; /**< Buffer information */
} drm_buf_map_t;
typedef struct drm_dma {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
- int *send_indices; /**< List of handles to buffers */
- int *send_sizes; /**< Lengths of data to send */
+ int __user *send_indices; /**< List of handles to buffers */
+ int __user *send_sizes; /**< Lengths of data to send */
drm_dma_flags_t flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
- int *request_indices; /**< Buffer information */
- int *request_sizes;
+ int __user *request_indices; /**< Buffer information */
+ int __user *request_sizes;
int granted_count; /**< Number of buffers granted */
} drm_dma_t;
*/
typedef struct drm_ctx_res {
int count;
- drm_ctx_t *contexts;
+ drm_ctx_t __user *contexts;
} drm_ctx_res_t;
drm_file_t *tag; /**< associated fd private data */
} drm_ctx_list_t;
-#if __HAVE_VBL_IRQ
+#ifdef __HAVE_VBL_IRQ
typedef struct drm_vbl_sig {
struct list_head head;
struct work_struct work;
/** \name VBLANK IRQ support */
/*@{*/
-#if __HAVE_VBL_IRQ
+#ifdef __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
atomic_t vbl_received;
spinlock_t vbl_lock;
struct vm_area_struct *vma);
extern int DRM(mmap)(struct file *filp, struct vm_area_struct *vma);
extern unsigned int DRM(poll)(struct file *filp, struct poll_table_struct *wait);
-extern ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off);
+extern ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off);
/* Memory management support (drm_memory.h) */
extern void DRM(mem_init)(void);
extern void DRM(driver_irq_preinstall)( drm_device_t *dev );
extern void DRM(driver_irq_postinstall)( drm_device_t *dev );
extern void DRM(driver_irq_uninstall)( drm_device_t *dev );
-#if __HAVE_VBL_IRQ
+#ifdef __HAVE_VBL_IRQ
extern int DRM(wait_vblank)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
-#if __HAVE_IRQ_BH
+#ifdef __HAVE_IRQ_BH
extern void DRM(irq_immediate_bh)( void *dev );
#endif
#endif
struct proc_dir_entry *root,
struct proc_dir_entry *dev_root);
-#if __HAVE_SG
+#ifdef __HAVE_SG
/* Scatter Gather Support (drm_scatter.h) */
extern void DRM(sg_cleanup)(drm_sg_mem_t *entry);
extern int DRM(sg_alloc)(struct inode *inode, struct file *filp,
info.id_vendor = kern->device->vendor;
info.id_device = kern->device->device;
- if (copy_to_user((drm_agp_info_t *)arg, &info, sizeof(info)))
+ if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
if (!dev->agp || !dev->agp->acquired || !drm_agp->enable)
return -EINVAL;
- if (copy_from_user(&mode, (drm_agp_mode_t *)arg, sizeof(mode)))
+ if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode)))
return -EFAULT;
dev->agp->mode = mode.mode;
DRM_AGP_MEM *memory;
unsigned long pages;
u32 type;
+ drm_agp_buffer_t __user *argp = (void __user *)arg;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request)))
+ if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(alloc)(sizeof(*entry), DRM_MEM_AGPLISTS)))
return -ENOMEM;
request.handle = entry->handle;
request.physical = memory->physical;
- if (copy_to_user((drm_agp_buffer_t *)arg, &request, sizeof(request))) {
+ if (copy_to_user(argp, &request, sizeof(request))) {
dev->agp->memory = entry->next;
dev->agp->memory->prev = NULL;
DRM(free_agp)(memory, pages);
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_binding_t __user *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(agp_lookup_entry)(dev, request.handle)))
return -EINVAL;
if (!dev->agp || !dev->agp->acquired || !drm_agp->bind_memory)
return -EINVAL;
- if (copy_from_user(&request, (drm_agp_binding_t *)arg, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_binding_t __user *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(agp_lookup_entry)(dev, request.handle)))
return -EINVAL;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user(&request, (drm_agp_buffer_t *)arg, sizeof(request)))
+ if (copy_from_user(&request, (drm_agp_buffer_t __user *)arg, sizeof(request)))
return -EFAULT;
if (!(entry = DRM(agp_lookup_entry)(dev, request.handle)))
return -EINVAL;
}
DRM_DEBUG("%u\n", auth.magic);
- if (copy_to_user((drm_auth_t *)arg, &auth, sizeof(auth)))
+ if (copy_to_user((drm_auth_t __user *)arg, &auth, sizeof(auth)))
return -EFAULT;
return 0;
}
drm_auth_t auth;
drm_file_t *file;
- if (copy_from_user(&auth, (drm_auth_t *)arg, sizeof(auth)))
+ if (copy_from_user(&auth, (drm_auth_t __user *)arg, sizeof(auth)))
return -EFAULT;
DRM_DEBUG("%u\n", auth.magic);
if ((file = DRM(find_file)(dev, auth.magic))) {
int order;
unsigned long tmp;
- for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
+ for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
+ ;
- if ( size & ~(1 << order) )
+ if (size & (size - 1))
++order;
return order;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map;
+ drm_map_t __user *argp = (void __user *)arg;
drm_map_list_t *list;
if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
if ( !map )
return -ENOMEM;
- if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
+ if ( copy_from_user( map, argp, sizeof(*map) ) ) {
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
return -EFAULT;
}
list_add(&list->head, &dev->maplist->head);
up(&dev->struct_sem);
- if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
+ if ( copy_to_user( argp, map, sizeof(*map) ) )
return -EFAULT;
if ( map->type != _DRM_SHM ) {
- if ( copy_to_user( &((drm_map_t *)arg)->handle,
+ if ( copy_to_user( &argp->handle,
&map->offset,
sizeof(map->offset) ) )
return -EFAULT;
drm_map_t request;
int found_maps = 0;
- if (copy_from_user(&request, (drm_map_t *)arg,
+ if (copy_from_user(&request, (drm_map_t __user *)arg,
sizeof(request))) {
return -EFAULT;
}
int byte_count;
int i;
drm_buf_t **temp_buflist;
+ drm_buf_desc_t __user *argp = (void __user *)arg;
if ( !dma ) return -EINVAL;
- if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
+ if ( copy_from_user( &request, argp,
sizeof(request) ) )
return -EFAULT;
request.count = entry->buf_count;
request.size = size;
- if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
+ if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_AGP;
int page_count;
unsigned long *temp_pagelist;
drm_buf_t **temp_buflist;
+ drm_buf_desc_t __user *argp = (void __user *)arg;
if ( !dma ) return -EINVAL;
- if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
- sizeof(request) ) )
+ if ( copy_from_user( &request, argp, sizeof(request) ) )
return -EFAULT;
count = request.count;
request.count = entry->buf_count;
request.size = size;
- if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
+ if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
atomic_dec( &dev->buf_alloc );
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t __user *argp = (void __user *)arg;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
if ( !dma ) return -EINVAL;
- if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
- sizeof(request) ) )
+ if ( copy_from_user( &request, argp, sizeof(request) ) )
return -EFAULT;
count = request.count;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head( &buf->dma_wait );
- buf->filp = 0;
+ buf->filp = NULL;
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
request.count = entry->buf_count;
request.size = size;
- if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
+ if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_SG;
{
drm_buf_desc_t request;
- if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
+ if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
sizeof(request) ) )
return -EFAULT;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t request;
+ drm_buf_info_t __user *argp = (void __user *)arg;
int i;
int count;
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock( &dev->count_lock );
- if ( copy_from_user( &request,
- (drm_buf_info_t *)arg,
- sizeof(request) ) )
+ if ( copy_from_user( &request, argp, sizeof(request) ) )
return -EFAULT;
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( request.count >= count ) {
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( dma->bufs[i].buf_count ) {
- drm_buf_desc_t *to = &request.list[count];
+ drm_buf_desc_t __user *to = &request.list[count];
drm_buf_entry_t *from = &dma->bufs[i];
drm_freelist_t *list = &dma->bufs[i].freelist;
if ( copy_to_user( &to->count,
}
request.count = count;
- if ( copy_to_user( (drm_buf_info_t *)arg,
- &request,
- sizeof(request) ) )
+ if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
return 0;
if ( !dma ) return -EINVAL;
if ( copy_from_user( &request,
- (drm_buf_desc_t *)arg,
+ (drm_buf_desc_t __user *)arg,
sizeof(request) ) )
return -EFAULT;
if ( !dma ) return -EINVAL;
if ( copy_from_user( &request,
- (drm_buf_free_t *)arg,
+ (drm_buf_free_t __user *)arg,
sizeof(request) ) )
return -EFAULT;
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
+ drm_buf_map_t __user *argp = (void __user *)arg;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
dev->buf_use++; /* Can't allocate more after this call */
spin_unlock( &dev->count_lock );
- if ( copy_from_user( &request, (drm_buf_map_t *)arg,
- sizeof(request) ) )
+ if ( copy_from_user( &request, argp, sizeof(request) ) )
return -EFAULT;
if ( request.count >= dma->buf_count ) {
retcode = (signed long)virtual;
goto done;
}
- request.virtual = (void *)virtual;
+ request.virtual = (void __user *)virtual;
for ( i = 0 ; i < dma->buf_count ; i++ ) {
if ( copy_to_user( &request.list[i].idx,
request.count = dma->buf_count;
DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
- if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
+ if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
return retcode;
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
+ drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request;
drm_map_t *map;
- if (copy_from_user(&request,
- (drm_ctx_priv_map_t *)arg,
- sizeof(request)))
+ if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
up(&dev->struct_sem);
request.handle = map->handle;
- if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
+ if (copy_to_user(argp, &request, sizeof(request)))
return -EFAULT;
return 0;
}
struct list_head *list;
if (copy_from_user(&request,
- (drm_ctx_priv_map_t *)arg,
+ (drm_ctx_priv_map_t __user *)arg,
sizeof(request)))
return -EFAULT;
unsigned int cmd, unsigned long arg )
{
drm_ctx_res_t res;
+ drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
int i;
- if ( copy_from_user( &res, (drm_ctx_res_t *)arg, sizeof(res) ) )
+ if ( copy_from_user( &res, argp, sizeof(res) ) )
return -EFAULT;
if ( res.count >= DRM_RESERVED_CONTEXTS ) {
}
res.count = DRM_RESERVED_CONTEXTS;
- if ( copy_to_user( (drm_ctx_res_t *)arg, &res, sizeof(res) ) )
+ if ( copy_to_user( argp, &res, sizeof(res) ) )
return -EFAULT;
return 0;
}
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_list_t * ctx_entry;
+ drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, argp, sizeof(ctx) ) )
return -EFAULT;
ctx.handle = DRM(ctxbitmap_next)( dev );
++dev->ctx_count;
up( &dev->ctxlist_sem );
- if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
+ if ( copy_to_user( argp, &ctx, sizeof(ctx) ) )
return -EFAULT;
return 0;
}
int DRM(getctx)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
+ drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t*)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, argp, sizeof(ctx) ) )
return -EFAULT;
/* This is 0, because we don't handle any context flags */
ctx.flags = 0;
- if ( copy_to_user( (drm_ctx_t*)arg, &ctx, sizeof(ctx) ) )
+ if ( copy_to_user( argp, &ctx, sizeof(ctx) ) )
return -EFAULT;
return 0;
}
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
+ if ( copy_from_user( &ctx, (drm_ctx_t __user *)arg, sizeof(ctx) ) )
return -EFAULT;
DRM_DEBUG( "%d\n", ctx.handle );
{
drm_control_t ctl;
- if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
+ if ( copy_from_user( &ctl, (drm_control_t __user *)arg, sizeof(ctl) ) )
return -EFAULT;
switch ( ctl.func ) {
draw.handle = 0; /* NOOP */
DRM_DEBUG("%d\n", draw.handle);
- if (copy_to_user((drm_draw_t *)arg, &draw, sizeof(draw)))
+ if (copy_to_user((drm_draw_t __user *)arg, &draw, sizeof(draw)))
return -EFAULT;
return 0;
}
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
#endif
-#if __HAVE_VBL_IRQ
+#ifdef __HAVE_VBL_IRQ
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 },
#endif
int DRM(version)( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
+ drm_version_t __user *argp = (void __user *)arg;
drm_version_t version;
int len;
- if ( copy_from_user( &version,
- (drm_version_t *)arg,
- sizeof(version) ) )
+ if ( copy_from_user( &version, argp, sizeof(version) ) )
return -EFAULT;
#define DRM_COPY( name, value ) \
DRM_COPY( version.date, DRIVER_DATE );
DRM_COPY( version.desc, DRIVER_DESC );
- if ( copy_to_user( (drm_version_t *)arg,
- &version,
- sizeof(version) ) )
+ if ( copy_to_user( argp, &version, sizeof(version) ) )
return -EFAULT;
return 0;
}
++priv->lock_count;
- if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
+ if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
return -EFAULT;
if ( lock.context == DRM_KERNEL_CONTEXT ) {
drm_device_t *dev = priv->dev;
drm_lock_t lock;
- if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
+ if ( copy_from_user( &lock, (drm_lock_t __user *)arg, sizeof(lock) ) )
return -EFAULT;
if ( lock.context == DRM_KERNEL_CONTEXT ) {
* agent to request it then we should just be able to
* take it immediately and not eat the ioctl.
*/
- dev->lock.filp = 0;
+ dev->lock.filp = NULL;
{
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
unsigned int old, new, prev, ctx;
#if !__HAVE_DRIVER_FOPS_READ
/** No-op. */
-ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
+ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off)
{
return 0;
}
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
+ drm_unique_t __user *argp = (void __user *)arg;
drm_unique_t u;
- if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
+ if (copy_from_user(&u, argp, sizeof(u)))
return -EFAULT;
if (u.unique_len >= dev->unique_len) {
if (copy_to_user(u.unique, dev->unique, dev->unique_len))
return -EFAULT;
}
u.unique_len = dev->unique_len;
- if (copy_to_user((drm_unique_t *)arg, &u, sizeof(u)))
+ if (copy_to_user(argp, &u, sizeof(u)))
return -EFAULT;
return 0;
}
if (dev->unique_len || dev->unique) return -EBUSY;
- if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u))) return -EFAULT;
+ if (copy_from_user(&u, (drm_unique_t __user *)arg, sizeof(u)))
+ return -EFAULT;
if (!u.unique_len || u.unique_len > 1024) return -EINVAL;
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
+ drm_map_t __user *argp = (void __user *)arg;
drm_map_t map;
drm_map_list_t *r_list = NULL;
struct list_head *list;
int idx;
int i;
- if (copy_from_user(&map, (drm_map_t *)arg, sizeof(map)))
+ if (copy_from_user(&map, argp, sizeof(map)))
return -EFAULT;
idx = map.offset;
map.mtrr = r_list->map->mtrr;
up(&dev->struct_sem);
- if (copy_to_user((drm_map_t *)arg, &map, sizeof(map))) return -EFAULT;
+ if (copy_to_user(argp, &map, sizeof(map))) return -EFAULT;
return 0;
}
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
+ drm_client_t __user *argp = (void __user *)arg;
drm_client_t client;
drm_file_t *pt;
int idx;
int i;
- if (copy_from_user(&client, (drm_client_t *)arg, sizeof(client)))
+ if (copy_from_user(&client, argp, sizeof(client)))
return -EFAULT;
idx = client.idx;
down(&dev->struct_sem);
client.iocs = pt->ioctl_count;
up(&dev->struct_sem);
- if (copy_to_user((drm_client_t *)arg, &client, sizeof(client)))
+ if (copy_to_user((drm_client_t __user *)arg, &client, sizeof(client)))
return -EFAULT;
return 0;
}
up(&dev->struct_sem);
- if (copy_to_user((drm_stats_t *)arg, &stats, sizeof(stats)))
+ if (copy_to_user((drm_stats_t __user *)arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
drm_set_version_t sv;
drm_set_version_t retv;
int if_version;
+ drm_set_version_t __user *argp = (void __user *)data;
- DRM_COPY_FROM_USER_IOCTL(sv, (drm_set_version_t *)data, sizeof(sv));
+ DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv));
retv.drm_di_major = DRM_IF_MAJOR;
retv.drm_di_minor = DRM_IF_MINOR;
retv.drm_dd_major = DRIVER_MAJOR;
retv.drm_dd_minor = DRIVER_MINOR;
- DRM_COPY_TO_USER_IOCTL((drm_set_version_t *)data, retv, sizeof(sv));
+ DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv));
if (sv.drm_di_major != -1) {
if (sv.drm_di_major != DRM_IF_MAJOR ||
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
+ drm_irq_busid_t __user *argp = (void __user *)arg;
drm_irq_busid_t p;
- if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p)))
+ if (copy_from_user(&p, argp, sizeof(p)))
return -EFAULT;
if ((p.busnum >> 8) != dev->pci_domain ||
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
p.busnum, p.devnum, p.funcnum, p.irq);
- if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p)))
+ if (copy_to_user(argp, &p, sizeof(p)))
return -EFAULT;
return 0;
}
dev->dma->this_buffer = NULL;
#endif
-#if __HAVE_IRQ_BH
+#ifdef __HAVE_IRQ_BH
INIT_WORK(&dev->work, DRM(irq_immediate_bh), dev);
#endif
-#if __HAVE_VBL_IRQ
+#ifdef __HAVE_VBL_IRQ
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init( &dev->vbl_lock );
drm_device_t *dev = priv->dev;
drm_control_t ctl;
- if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
+ if ( copy_from_user( &ctl, (drm_control_t __user *)arg, sizeof(ctl) ) )
return -EFAULT;
switch ( ctl.func ) {
}
}
-#if __HAVE_VBL_IRQ
+#ifdef __HAVE_VBL_IRQ
/**
* Wait for VBLANK.
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
+ drm_wait_vblank_t __user *argp = (void __user *)data;
drm_wait_vblank_t vblwait;
struct timeval now;
int ret = 0;
if (!dev->irq)
return -EINVAL;
- DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
- sizeof(vblwait) );
+ DRM_COPY_FROM_USER_IOCTL( vblwait, argp, sizeof(vblwait) );
switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
case _DRM_VBLANK_RELATIVE:
}
done:
- DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
- sizeof(vblwait) );
+ DRM_COPY_TO_USER_IOCTL( argp, vblwait, sizeof(vblwait) );
return ret;
}
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
+ drm_scatter_gather_t __user *argp = (void __user *)arg;
drm_scatter_gather_t request;
drm_sg_mem_t *entry;
unsigned long pages, i, j;
if ( dev->sg )
return -EINVAL;
- if ( copy_from_user( &request,
- (drm_scatter_gather_t *)arg,
- sizeof(request) ) )
+ if ( copy_from_user( &request, argp, sizeof(request) ) )
return -EFAULT;
entry = DRM(alloc)( sizeof(*entry), DRM_MEM_SGLISTS );
request.handle = entry->handle;
- if ( copy_to_user( (drm_scatter_gather_t *)arg,
- &request,
- sizeof(request) ) ) {
+ if ( copy_to_user( argp, &request, sizeof(request) ) ) {
DRM(sg_cleanup)( entry );
return -EFAULT;
}
drm_sg_mem_t *entry;
if ( copy_from_user( &request,
- (drm_scatter_gather_t *)arg,
+ (drm_scatter_gather_t __user *)arg,
sizeof(request) ) )
return -EFAULT;
struct drm_agp_mem *agpmem;
struct page *page;
-#if __alpha__
+#ifdef __alpha__
/*
* Adjust to a bus-relative address
*/
{
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
-#if DRM_DMA_HISTOGRAM
+#ifdef DRM_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
- if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
+ if (copy_from_user(&res, (drm_ctx_res_t __user *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
}
}
res.count = DRM_RESERVED_CONTEXTS;
- if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
+ if (copy_to_user((drm_ctx_res_t __user *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
drm_ctx_t ctx;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
idx = DRM(alloc_queue)(dev, (ctx.flags & _DRM_CONTEXT_2DONLY));
if (idx < 0)
DRM_DEBUG("%d\n", ctx.handle);
ctx.handle = idx;
- if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
+ if (copy_to_user((drm_ctx_t __user *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
drm_ctx_t ctx;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
idx = ctx.handle;
drm_ctx_t ctx;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
idx = ctx.handle;
else
ctx.flags = 0;
- if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
+ if (copy_to_user((drm_ctx_t __user *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
{
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int idx;
- if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
unsigned long addr = -ENOMEM;
if (!map)
- return get_unmapped_area(NULL, hint, len, pgoff, flags, 0);
+ return get_unmapped_area(NULL, hint, len, pgoff, flags);
if (map->type == _DRM_FRAME_BUFFER ||
map->type == _DRM_REGISTERS) {
#ifdef HAVE_ARCH_FB_UNMAPPED_AREA
addr = get_fb_unmapped_area(filp, hint, len, pgoff, flags);
#else
- addr = get_unmapped_area(NULL, hint, len, pgoff, flags, 0);
+ addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
#endif
} else if (map->type == _DRM_SHM && SHMLBA > PAGE_SIZE) {
unsigned long slack = SHMLBA - PAGE_SIZE;
- addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags, 0);
+ addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags);
if (!(addr & ~PAGE_MASK)) {
unsigned long kvirt = (unsigned long) map->handle;
}
}
} else {
- addr = get_unmapped_area(NULL, hint, len, pgoff, flags, 0);
+ addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
}
return addr;
the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
-ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
+ssize_t DRM(read)(struct file *filp, char __user *buf, size_t count, loff_t *off)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int DRM(resctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
+ drm_ctx_res_t __user *argp = (void __user *)arg;
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
- if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
+ if (copy_from_user(&res, argp, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
}
}
res.count = DRM_RESERVED_CONTEXTS;
- if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
+ if (copy_to_user(argp, &res, sizeof(res)))
return -EFAULT;
return 0;
}
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
+ drm_ctx_t __user *argp = (void __user *)arg;
- if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, argp, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
/* Init kernel's context and get a new one. */
}
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
DRM_DEBUG("%d\n", ctx.handle);
- if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
+ if (copy_to_user(argp, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
drm_ctx_t ctx;
drm_queue_t *q;
- if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
+ drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
drm_queue_t *q;
- if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, argp, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
ctx.flags = q->flags;
atomic_dec(&q->use_count);
- if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
+ if (copy_to_user(argp, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
- if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
DRM(context_switch_complete)(dev, ctx.handle);
drm_queue_t *q;
drm_buf_t *buf;
- if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
+ drm_dma_t __user *argp = (void __user *)arg;
drm_dma_t d;
- if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
+ if (copy_from_user(&d, argp, sizeof(d)))
return -EFAULT;
if (d.send_count < 0 || d.send_count > dma->buf_count) {
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
- if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
+ if (copy_to_user(argp, &d, sizeof(d)))
return -EFAULT;
return retcode;
LOCK_TEST_WITH_RETURN( dev, filp );
- if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) )
+ if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
return -EFAULT;
switch ( init.func ) {
drm_device_t *dev = priv->dev;
drm_gamma_copy_t copy;
- if ( copy_from_user( ©, (drm_gamma_copy_t *)arg, sizeof(copy) ) )
+ if ( copy_from_user( ©, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
return -EFAULT;
return gamma_do_copy_dma( dev, © );
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
+ drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request;
drm_map_t *map;
- if (copy_from_user(&request,
- (drm_ctx_priv_map_t *)arg,
- sizeof(request)))
+ if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
up(&dev->struct_sem);
request.handle = map->handle;
- if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
+ if (copy_to_user(argp, &request, sizeof(request)))
return -EFAULT;
return 0;
}
struct list_head *list;
if (copy_from_user(&request,
- (drm_ctx_priv_map_t *)arg,
+ (drm_ctx_priv_map_t __user *)arg,
sizeof(request)))
return -EFAULT;
DRM_DEBUG("\n");
- if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
+ if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock)))
return -EFAULT;
ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
DRM(flush_unblock)(dev, lock.context, lock.flags);
int idx;
int while_locked = 0;
drm_device_dma_t *dma = dev->dma;
+ int *ind;
+ int err;
DECLARE_WAITQUEUE(entry, current);
DRM_DEBUG("%d\n", d->send_count);
remove_wait_queue(&q->write_queue, &entry);
}
+ ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER);
+ if (!ind)
+ return -ENOMEM;
+
+ if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ err = -EINVAL;
for (i = 0; i < d->send_count; i++) {
- idx = d->send_indices[i];
+ idx = ind[i];
if (idx < 0 || idx >= dma->buf_count) {
- atomic_dec(&q->use_count);
DRM_ERROR("Index %d (of %d max)\n",
- d->send_indices[i], dma->buf_count - 1);
- return -EINVAL;
+ ind[i], dma->buf_count - 1);
+ goto out;
}
buf = dma->buflist[ idx ];
if (buf->filp != filp) {
- atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer not owned\n",
current->pid);
- return -EINVAL;
+ goto out;
}
if (buf->list != DRM_LIST_NONE) {
- atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer %d on list %d\n",
current->pid, buf->idx, buf->list);
+ goto out;
}
- buf->used = d->send_sizes[i];
+ buf->used = ind[i];
buf->while_locked = while_locked;
buf->context = d->context;
if (!buf->used) {
DRM_ERROR("Queueing 0 length buffer\n");
}
if (buf->pending) {
- atomic_dec(&q->use_count);
DRM_ERROR("Queueing pending buffer:"
" buffer %d, offset %d\n",
- d->send_indices[i], i);
- return -EINVAL;
+ ind[i], i);
+ goto out;
}
if (buf->waiting) {
- atomic_dec(&q->use_count);
DRM_ERROR("Queueing waiting buffer:"
" buffer %d, offset %d\n",
- d->send_indices[i], i);
- return -EINVAL;
+ ind[i], i);
+ goto out;
}
buf->waiting = 1;
if (atomic_read(&q->use_count) == 1
atomic_dec(&q->use_count);
return 0;
+
+out:
+ DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER);
+ atomic_dec(&q->use_count);
+ return err;
}
static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
/* Real error */
DRM_ERROR("mmap error\n");
retcode = (signed int)buf_priv->virtual;
- buf_priv->virtual = 0;
+ buf_priv->virtual = NULL;
}
up_write( ¤t->mm->mmap_sem );
up_write(¤t->mm->mmap_sem);
buf_priv->currently_mapped = I810_BUF_UNMAPPED;
- buf_priv->virtual = 0;
+ buf_priv->virtual = NULL;
return retcode;
}
{
/* Get v1.1 init data */
- if (copy_from_user(init, (drm_i810_pre12_init_t *)arg,
+ if (copy_from_user(init, (drm_i810_pre12_init_t __user *)arg,
sizeof(drm_i810_pre12_init_t))) {
return -EFAULT;
}
/* This is a v1.2 client, just get the v1.2 init data */
DRM_INFO("Using POST v1.2 init.\n");
- if (copy_from_user(init, (drm_i810_init_t *)arg,
+ if (copy_from_user(init, (drm_i810_init_t __user *)arg,
sizeof(drm_i810_init_t))) {
return -EFAULT;
}
int retcode = 0;
/* Get only the init func */
- if (copy_from_user(&init, (void *)arg, sizeof(drm_i810_init_func_t)))
+ if (copy_from_user(&init, (void __user *)arg, sizeof(drm_i810_init_func_t)))
return -EFAULT;
switch(init.func) {
default:
case I810_INIT_DMA_1_4:
DRM_INFO("Using v1.4 init.\n");
- if (copy_from_user(&init, (drm_i810_init_t *)arg,
+ if (copy_from_user(&init, (drm_i810_init_t __user *)arg,
sizeof(drm_i810_init_t))) {
return -EFAULT;
}
dev_priv->sarea_priv;
drm_i810_vertex_t vertex;
- if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex)))
+ if (copy_from_user(&vertex, (drm_i810_vertex_t __user *)arg, sizeof(vertex)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
drm_device_t *dev = priv->dev;
drm_i810_clear_t clear;
- if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear)))
+ if (copy_from_user(&clear, (drm_i810_clear_t __user *)arg, sizeof(clear)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
- if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d)))
+ if (copy_from_user(&d, (drm_i810_dma_t __user *)arg, sizeof(d)))
return -EFAULT;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
current->pid, retcode, d.granted);
- if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
+ if (copy_to_user((drm_dma_t __user *)arg, &d, sizeof(d)))
return -EFAULT;
sarea_priv->last_dispatch = (int) hw_status[5];
dev_priv->sarea_priv;
drm_i810_mc_t mc;
- if (copy_from_user(&mc, (drm_i810_mc_t *)arg, sizeof(mc)))
+ if (copy_from_user(&mc, (drm_i810_mc_t __user *)arg, sizeof(mc)))
return -EFAULT;
data.offset = dev_priv->overlay_offset;
data.physical = dev_priv->overlay_physical;
- if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)))
+ if (copy_to_user((drm_i810_overlay_t __user *)arg,&data,sizeof(data)))
return -EFAULT;
return 0;
}
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
drm_i830_private_t *dev_priv = dev->dev_private;
struct file_operations *old_fops;
+ unsigned long virtual;
int retcode = 0;
if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL;
old_fops = filp->f_op;
filp->f_op = &i830_buffer_fops;
dev_priv->mmap_buffer = buf;
- buf_priv->virtual = (void __user *)do_mmap(filp, 0, buf->total,
- PROT_READ|PROT_WRITE,
- MAP_SHARED,
- buf->bus_address);
+ virtual = do_mmap(filp, 0, buf->total, PROT_READ|PROT_WRITE,
+ MAP_SHARED, buf->bus_address);
dev_priv->mmap_buffer = NULL;
filp->f_op = old_fops;
- if (IS_ERR(buf_priv->virtual)) {
+ if (IS_ERR((void *)virtual)) { /* ugh */
/* Real error */
DRM_ERROR("mmap error\n");
- retcode = PTR_ERR(buf_priv->virtual);
+ retcode = virtual;
buf_priv->virtual = NULL;
+ } else {
+ buf_priv->virtual = (void __user *)virtual;
}
up_write( ¤t->mm->mmap_sem );
}
int i830_dma_init(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long __user arg)
+ unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_flush_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long __user arg)
+ unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_dma_vertex(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long __user arg)
+ unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_clear_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long __user arg)
+ unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_swap_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long __user arg)
+ unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_flip_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long __user arg)
+ unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long __user arg)
+ unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
}
int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long __user arg)
+ unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i830_copybuf(struct inode *inode,
struct file *filp,
unsigned int cmd,
- unsigned long __user arg)
+ unsigned long arg)
{
/* Never copy - 2.4.x doesn't need it */
return 0;
int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long __user arg )
+ unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long __user arg )
+ unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
/* Needs the lock as it touches the ring.
*/
int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long __user arg )
+ unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
return -EINVAL;
}
- if (copy_from_user( &emit, (drm_i830_irq_emit_t *)arg, sizeof(emit) ))
+ if (copy_from_user( &emit, (drm_i830_irq_emit_t __user *)arg, sizeof(emit) ))
return -EFAULT;
result = i830_emit_irq( dev );
return -EINVAL;
}
- if (copy_from_user( &irqwait, (drm_i830_irq_wait_t *)arg,
+ if (copy_from_user( &irqwait, (drm_i830_irq_wait_t __user *)arg,
sizeof(irqwait) ))
return -EFAULT;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t *)data, sizeof(init) );
+ DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) );
switch ( init.func ) {
case MGA_INIT_DMA:
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) );
+ DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t __user *)data, sizeof(lock) );
DRM_DEBUG( "%s%s%s\n",
(lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
int ret = 0;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) );
+ DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
/* Please don't send us buffers.
*/
ret = mga_dma_get_buffers( filp, dev, &d );
}
- DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) );
+ DRM_COPY_TO_USER_IOCTL( argp, d, sizeof(d) );
return ret;
}
typedef struct drm_mga_getparam {
int param;
- void *value;
+ void __user *value;
} drm_mga_getparam_t;
#endif
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t *)data, sizeof(clear) );
+ DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t __user *)data, sizeof(clear) );
if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( vertex,
- (drm_mga_vertex_t *)data,
+ (drm_mga_vertex_t __user *)data,
sizeof(vertex) );
if(vertex.idx < 0 || vertex.idx > dma->buf_count) return DRM_ERR(EINVAL);
LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( indices,
- (drm_mga_indices_t *)data,
+ (drm_mga_indices_t __user *)data,
sizeof(indices) );
if(indices.idx < 0 || indices.idx > dma->buf_count) return DRM_ERR(EINVAL);
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t *)data, sizeof(iload) );
+ DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t __user *)data, sizeof(iload) );
#if 0
if ( mga_do_wait_for_idle( dev_priv ) < 0 ) {
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t *)data, sizeof(blit) );
+ DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t __user *)data, sizeof(blit) );
if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( param, (drm_mga_getparam_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( param, (drm_mga_getparam_t __user *)data,
sizeof(param) );
DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( init, (drm_r128_init_t *)data, sizeof(init) );
+ DRM_COPY_FROM_USER_IOCTL( init, (drm_r128_init_t __user *)data, sizeof(init) );
switch ( init.func ) {
case R128_INIT_CCE:
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t *)data, sizeof(stop) );
+ DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *)data, sizeof(stop) );
/* Flush any pending CCE commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
int ret = 0;
+ drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *) data, sizeof(d) );
+ DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
/* Please don't send us buffers.
*/
ret = r128_cce_get_buffers( filp, dev, &d );
}
- DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d) );
+ DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d) );
return ret;
}
R128_READ_PIXELS = 0x04
} func;
int n;
- int *x;
- int *y;
- unsigned int *buffer;
- unsigned char *mask;
+ int __user *x;
+ int __user *y;
+ unsigned int __user *buffer;
+ unsigned char __user *mask;
} drm_r128_depth_t;
typedef struct drm_r128_stipple {
- unsigned int *mask;
+ unsigned int __user *mask;
} drm_r128_stipple_t;
typedef struct drm_r128_indirect {
typedef struct drm_r128_getparam {
int param;
- void *value;
+ void __user *value;
} drm_r128_getparam_t;
#endif
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t *) data,
+ DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t __user *) data,
sizeof(clear) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( vertex, (drm_r128_vertex_t *) data,
+ DRM_COPY_FROM_USER_IOCTL( vertex, (drm_r128_vertex_t __user *) data,
sizeof(vertex) );
DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( elts, (drm_r128_indices_t *) data,
+ DRM_COPY_FROM_USER_IOCTL( elts, (drm_r128_indices_t __user *) data,
sizeof(elts) );
DRM_DEBUG( "pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t *) data,
+ DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t __user *) data,
sizeof(blit) );
DRM_DEBUG( "pid=%d index=%d\n", DRM_CURRENTPID, blit.idx );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t *) data,
+ DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t __user *) data,
sizeof(depth) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t *) data,
+ DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t __user *) data,
sizeof(stipple) );
if ( DRM_COPY_FROM_USER( &mask, stipple.mask,
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( indirect, (drm_r128_indirect_t *) data,
+ DRM_COPY_FROM_USER_IOCTL( indirect, (drm_r128_indirect_t __user *) data,
sizeof(indirect) );
DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( param, (drm_r128_getparam_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( param, (drm_r128_getparam_t __user *)data,
sizeof(param) );
DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t *)data, sizeof(init) );
+ DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) );
switch ( init.func ) {
case RADEON_INIT_CP:
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) );
+ DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t __user *)data, sizeof(stop) );
if (!dev_priv->cp_running)
return 0;
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
int ret = 0;
+ drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) );
+ DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
/* Please don't send us buffers.
*/
ret = radeon_cp_get_buffers( filp, dev, &d );
}
- DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) );
+ DRM_COPY_TO_USER_IOCTL( argp, d, sizeof(d) );
return ret;
}
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask; /* misnamed field: should be stencil */
- drm_radeon_clear_rect_t *depth_boxes;
+ drm_radeon_clear_rect_t __user *depth_boxes;
} drm_radeon_clear_t;
typedef struct drm_radeon_vertex {
int idx; /* Index of vertex buffer */
int discard; /* Client finished with buffer? */
int nr_states;
- drm_radeon_state_t *state;
+ drm_radeon_state_t __user *state;
int nr_prims;
- drm_radeon_prim_t *prim;
+ drm_radeon_prim_t __user *prim;
} drm_radeon_vertex2_t;
/* v1.3 - obsoletes drm_radeon_vertex2
*/
typedef struct drm_radeon_cmd_buffer {
int bufsz;
- char *buf;
+ char __user *buf;
int nbox;
- drm_clip_rect_t *boxes;
+ drm_clip_rect_t __user *boxes;
} drm_radeon_cmd_buffer_t;
typedef struct drm_radeon_tex_image {
unsigned int x, y; /* Blit coordinates */
unsigned int width, height;
- const void *data;
+ const void __user *data;
} drm_radeon_tex_image_t;
typedef struct drm_radeon_texture {
int format;
int width; /* Texture image coordinates */
int height;
- drm_radeon_tex_image_t *image;
+ drm_radeon_tex_image_t __user *image;
} drm_radeon_texture_t;
typedef struct drm_radeon_stipple {
- unsigned int *mask;
+ unsigned int __user *mask;
} drm_radeon_stipple_t;
typedef struct drm_radeon_indirect {
typedef struct drm_radeon_getparam {
int param;
- void *value;
+ void __user *value;
} drm_radeon_getparam_t;
/* 1.6: Set up a memory manager for regions of shared memory:
int region;
int alignment;
int size;
- int *region_offset; /* offset from start of fb or GART */
+ int __user *region_offset; /* offset from start of fb or GART */
} drm_radeon_mem_alloc_t;
typedef struct drm_radeon_mem_free {
/* 1.6: Userspace can request & wait on irq's:
*/
typedef struct drm_radeon_irq_emit {
- int *irq_seq;
+ int __user *irq_seq;
} drm_radeon_irq_emit_t;
typedef struct drm_radeon_irq_wait {
#define OUT_RING_USER_TABLE( tab, sz ) do { \
int _size = (sz); \
- int *_tab = (tab); \
+ int __user *_tab = (tab); \
\
if (write + _size > mask) { \
int i = (mask+1) - write; \
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t __user *)data,
sizeof(emit) );
result = radeon_emit_irq( dev );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t __user*)data,
sizeof(irqwait) );
return radeon_wait_irq( dev, irqwait.irq_seq );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t __user *)data,
sizeof(alloc) );
heap = get_heap( dev_priv, alloc.region );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t __user *)data,
sizeof(memfree) );
heap = get_heap( dev_priv, memfree.region );
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t __user *)data,
sizeof(initheap) );
heap = get_heap( dev_priv, initheap.region );
static __inline__ int radeon_check_and_fixup_offset_user( drm_radeon_private_t *dev_priv,
drm_file_t *filp_priv,
- u32 *offset ) {
+ u32 __user *offset ) {
u32 off;
DRM_GET_USER_UNCHECKED( off, offset );
static __inline__ int radeon_check_and_fixup_packets( drm_radeon_private_t *dev_priv,
drm_file_t *filp_priv,
int id,
- u32 *data ) {
+ u32 __user *data ) {
switch ( id ) {
case RADEON_EMIT_PP_MISC:
drm_file_t *filp_priv,
drm_radeon_cmd_buffer_t *cmdbuf,
unsigned int *cmdsz ) {
- u32 tmp[4], *cmd = ( u32* )cmdbuf->buf;
+ u32 tmp[4];
+ u32 __user *cmd = (u32 __user *)cmdbuf->buf;
if ( DRM_COPY_FROM_USER_UNCHECKED( tmp, cmd, sizeof( tmp ) ) ) {
DRM_ERROR( "Failed to copy data from user space\n" );
drm_buf_t *buf;
u32 format;
u32 *buffer;
- const u8 *data;
+ const u8 __user *data;
int size, dwords, tex_width, blit_width;
u32 height;
int i;
* update them for a multi-pass texture blit.
*/
height = image->height;
- data = (const u8 *)image->data;
+ data = (const u8 __user *)image->data;
size = height * blit_width;
/* Update the input parameters for next time */
image->y += height;
image->height -= height;
- image->data = (const u8 *)image->data + size;
+ image->data = (const u8 __user *)image->data + size;
} while (image->height > 0);
/* Flush the pixel cache after the blit completes. This ensures
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t __user *)data,
sizeof(clear) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t __user *)data,
sizeof(vertex) );
DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t __user *)data,
sizeof(elts) );
DRM_DEBUG( "pid=%d index=%d start=%d end=%d discard=%d\n",
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t *)data, sizeof(tex) );
+ DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t __user *)data, sizeof(tex) );
if ( tex.image == NULL ) {
DRM_ERROR( "null texture image!\n" );
}
if ( DRM_COPY_FROM_USER( &image,
- (drm_radeon_tex_image_t *)tex.image,
+ (drm_radeon_tex_image_t __user *)tex.image,
sizeof(image) ) )
return DRM_ERR(EFAULT);
LOCK_TEST_WITH_RETURN( dev, filp );
- DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t __user *)data,
sizeof(stipple) );
if ( DRM_COPY_FROM_USER( &mask, stipple.mask, 32 * sizeof(u32) ) )
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t __user *)data,
sizeof(indirect) );
DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t __user *)data,
sizeof(vertex) );
DRM_DEBUG( "pid=%d index=%d discard=%d\n",
{
int id = (int)header.packet.packet_id;
int sz, reg;
- int *data = (int *)cmdbuf->buf;
+ int __user *data = (int __user *)cmdbuf->buf;
RING_LOCALS;
if (id >= RADEON_MAX_STATE_PACKETS)
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.scalars.count;
- int *data = (int *)cmdbuf->buf;
+ int __user *data = (int __user *)cmdbuf->buf;
int start = header.scalars.offset;
int stride = header.scalars.stride;
RING_LOCALS;
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.scalars.count;
- int *data = (int *)cmdbuf->buf;
+ int __user *data = (int __user *)cmdbuf->buf;
int start = ((unsigned int)header.scalars.offset) + 0x100;
int stride = header.scalars.stride;
RING_LOCALS;
drm_radeon_cmd_buffer_t *cmdbuf )
{
int sz = header.vectors.count;
- int *data = (int *)cmdbuf->buf;
+ int __user *data = (int __user *)cmdbuf->buf;
int start = header.vectors.offset;
int stride = header.vectors.stride;
RING_LOCALS;
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int cmdsz;
- int *cmd = (int *)cmdbuf->buf, ret;
+ int __user *cmd = (int __user *)cmdbuf->buf;
+ int ret;
RING_LOCALS;
DRM_DEBUG("\n");
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_clip_rect_t box;
unsigned int cmdsz;
- int *cmd = (int *)cmdbuf->buf, ret;
- drm_clip_rect_t *boxes = cmdbuf->boxes;
+ int __user *cmd = (int __user *)cmdbuf->buf;
+ int ret;
+ drm_clip_rect_t __user *boxes = cmdbuf->boxes;
int i = 0;
RING_LOCALS;
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t __user *)data,
sizeof(cmdbuf) );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
while ( cmdbuf.bufsz >= sizeof(header) ) {
- if (DRM_GET_USER_UNCHECKED( header.i, (int *)cmdbuf.buf )) {
+ if (DRM_GET_USER_UNCHECKED( header.i, (int __user *)cmdbuf.buf )) {
DRM_ERROR("__get_user %p\n", cmdbuf.buf);
return DRM_ERR(EFAULT);
}
return DRM_ERR(EINVAL);
}
- DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t *)data,
+ DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t __user *)data,
sizeof(param) );
DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
- DRM_COPY_FROM_USER_IOCTL( sp, ( drm_radeon_setparam_t* )data,
+ DRM_COPY_FROM_USER_IOCTL( sp, ( drm_radeon_setparam_t __user * )data,
sizeof( sp ) );
switch( sp.param ) {
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t fb;
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t *)data, sizeof(fb));
+ DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *)data, sizeof(fb));
if (dev_priv == NULL) {
dev->dev_private = DRM(calloc)(1, sizeof(drm_sis_private_t),
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
+ drm_sis_mem_t __user *argp = (void __user *)data;
drm_sis_mem_t fb;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb));
+ DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
block = mmAllocMem(dev_priv->FBHeap, fb.size, 0, 0);
if (block) {
fb.free = 0;
}
- DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, fb, sizeof(fb));
+ DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, fb.offset);
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb));
+ DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *)data, sizeof(fb));
if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock)fb.free))
return DRM_ERR(EINVAL);
if (dev_priv->AGPHeap != NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t *)data, sizeof(agp));
+ DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *)data, sizeof(agp));
dev_priv->AGPHeap = mmInit(agp.offset, agp.size);
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
+ drm_sis_mem_t __user *argp = (void __user *)data;
drm_sis_mem_t agp;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t *)data, sizeof(agp));
+ DRM_COPY_FROM_USER_IOCTL(agp, argp, sizeof(agp));
block = mmAllocMem(dev_priv->AGPHeap, agp.size, 0, 0);
if (block) {
agp.free = 0;
}
- DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, agp, sizeof(agp));
+ DRM_COPY_TO_USER_IOCTL(argp, agp, sizeof(agp));
DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset);
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return DRM_ERR(EINVAL);
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t *)data, sizeof(agp));
+ DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *)data, sizeof(agp));
if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock)agp.free))
return DRM_ERR(EINVAL);
}
static ssize_t
-ds1620_read(struct file *file, char *buf, size_t count, loff_t *ptr)
+ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
{
signed int cur_temp;
signed char cur_temp_degF;
- /* Can't seek (pread) on this device */
- if (ptr != &file->f_pos)
- return -ESPIPE;
-
cur_temp = cvt_9_to_int(ds1620_in(THERM_READ_TEMP, 9)) >> 1;
/* convert to Fahrenheit, as per wdt.c */
ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
struct therm therm;
+ union {
+ struct therm __user *therm;
+ int __user *i;
+ } uarg;
int i;
+ uarg.i = (int __user *)arg;
+
switch(cmd) {
case CMD_SET_THERMOSTATE:
case CMD_SET_THERMOSTATE2:
return -EPERM;
if (cmd == CMD_SET_THERMOSTATE) {
- if (get_user(therm.hi, (int *)arg))
+ if (get_user(therm.hi, uarg.i))
return -EFAULT;
therm.lo = therm.hi - 3;
} else {
- if (copy_from_user(&therm, (void *)arg, sizeof(therm)))
+ if (copy_from_user(&therm, uarg.therm, sizeof(therm)))
return -EFAULT;
}
therm.hi >>= 1;
if (cmd == CMD_GET_THERMOSTATE) {
- if (put_user(therm.hi, (int *)arg))
+ if (put_user(therm.hi, uarg.i))
return -EFAULT;
} else {
- if (copy_to_user((void *)arg, &therm, sizeof(therm)))
+ if (copy_to_user(uarg.therm, &therm, sizeof(therm)))
return -EFAULT;
}
break;
if (cmd == CMD_GET_TEMPERATURE)
i >>= 1;
- return put_user(i, (int *)arg) ? -EFAULT : 0;
+ return put_user(i, uarg.i) ? -EFAULT : 0;
case CMD_GET_STATUS:
i = ds1620_in(THERM_READ_CONFIG, 8) & 0xe3;
- return put_user(i, (int *)arg) ? -EFAULT : 0;
+ return put_user(i, uarg.i) ? -EFAULT : 0;
case CMD_GET_FAN:
i = netwinder_get_fan();
- return put_user(i, (int *)arg) ? -EFAULT : 0;
+ return put_user(i, uarg.i) ? -EFAULT : 0;
case CMD_SET_FAN:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (get_user(i, (int *)arg))
+ if (get_user(i, uarg.i))
return -EFAULT;
netwinder_set_fan(i);
static struct file_operations ds1620_fops = {
.owner = THIS_MODULE,
+ .open = nonseekable_open,
.read = ds1620_read,
.ioctl = ds1620_ioctl,
};
return ret;
#ifdef THERM_USE_PROC
- proc_therm_ds1620 = create_proc_entry("therm", 0, 0);
+ proc_therm_ds1620 = create_proc_entry("therm", 0, NULL);
if (proc_therm_ds1620)
proc_therm_ds1620->read_proc = proc_therm_ds1620_read;
else
}
case 2: /* 16 bit */
{
- short *data;
+ const short *data;
count /= 2;
- data = (short*) buf;
+ data = (const short *)buf;
handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
get_user(dsp56k_host_interface.data.w[1], data+n++));
return 2*n;
}
case 4: /* 32 bit */
{
- long *data;
+ const long *data;
count /= 4;
- data = (long*) buf;
+ data = (const long *)buf;
handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
get_user(dsp56k_host_interface.data.l, data+n++));
return 4*n;
char ch;
int i = 0, retries;
- /* Can't seek (pread) on the DoubleTalk. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
TRACE_TEXT("(dtlk_read");
/* printk("DoubleTalk PC - dtlk_read()\n"); */
}
#endif
- /* Can't seek (pwrite) on the DoubleTalk. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (iminor(file->f_dentry->d_inode) != DTLK_MINOR)
return -EINVAL;
{
TRACE_TEXT("(dtlk_open");
+ nonseekable_open(inode, file);
switch (iminor(inode)) {
case DTLK_MINOR:
if (dtlk_busy)
return -EBUSY;
- return 0;
+ return nonseekable_open(inode, file);
default:
return -ENXIO;
#define ENABLE_PCI
#endif /* CONFIG_PCI */
-#define putUser(arg1, arg2) put_user(arg1, (unsigned long *)arg2)
-#define getUser(arg1, arg2) get_user(arg1, (unsigned int *)arg2)
+#define putUser(arg1, arg2) put_user(arg1, (unsigned long __user *)arg2)
+#define getUser(arg1, arg2) get_user(arg1, (unsigned __user *)arg2)
#ifdef ENABLE_PCI
#include <linux/pci.h>
void epca_setup(char *, int *);
void console_print(const char *);
-static int get_termio(struct tty_struct *, struct termio *);
+static int get_termio(struct tty_struct *, struct termio __user *);
static int pc_write(struct tty_struct *, int, const unsigned char *, int);
int pc_init(void);
if (bytesAvailable)
{ /* Begin bytesAvailable */
+ /* ---------------------------------------------------------------
+ The below function reads data from user memory. This routine
+ can not be used in an interrupt routine. (Because it may
+ generate a page fault) It can only be called while we can the
+ user context is accessible.
+
+ The prototype is :
+ inline void copy_from_user(void * to, const void * from,
+ unsigned long count);
+
+ I also think (Check hackers guide) that optimization must
+ be turned ON. (Which sounds strange to me...)
+
+ Remember copy_from_user WILL generate a page fault if the
+ user memory being accessed has been swapped out. This can
+ cause this routine to temporarily sleep while this page
+ fault is occurring.
+
+ ----------------------------------------------------------------- */
- /* Can the user buffer be accessed at the moment ? */
- if (verify_area(VERIFY_READ, (char*)buf, bytesAvailable))
- bytesAvailable = 0; /* Can't do; try again later */
- else /* Evidently it can, began transmission */
- { /* Begin if area verified */
- /* ---------------------------------------------------------------
- The below function reads data from user memory. This routine
- can not be used in an interrupt routine. (Because it may
- generate a page fault) It can only be called while we can the
- user context is accessible.
-
- The prototype is :
- inline void copy_from_user(void * to, const void * from,
- unsigned long count);
-
- I also think (Check hackers guide) that optimization must
- be turned ON. (Which sounds strange to me...)
-
- Remember copy_from_user WILL generate a page fault if the
- user memory being accessed has been swapped out. This can
- cause this routine to temporarily sleep while this page
- fault is occurring.
-
- ----------------------------------------------------------------- */
-
- if (copy_from_user(ch->tmp_buf, buf,
- bytesAvailable))
- return -EFAULT;
-
- } /* End if area verified */
-
+ if (copy_from_user(ch->tmp_buf, buf,
+ bytesAvailable))
+ return -EFAULT;
} /* End bytesAvailable */
/* ------------------------------------------------------------------
ch->boardnum = crd;
ch->channelnum = i;
ch->magic = EPCA_MAGIC;
- ch->tty = 0;
+ ch->tty = NULL;
if (shrinkmem)
{
{ /* Begin receive_data */
unchar *rptr;
- struct termios *ts = 0;
+ struct termios *ts = NULL;
struct tty_struct *tty;
volatile struct board_chan *bc;
register int dataToRead, wrapgap, bytesAvailable;
static int info_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
- int error;
-
switch (cmd)
{ /* Begin switch cmd */
struct digi_info di ;
int brd;
- getUser(brd, (unsigned int *)arg);
-
- if ((error = verify_area(VERIFY_WRITE, (char*)arg, sizeof(di))))
- {
- printk(KERN_ERR "DIGI_GETINFO : verify area size 0x%x failed\n",sizeof(di));
- return(error);
- }
+ getUser(brd, (unsigned int __user *)arg);
if ((brd < 0) || (brd >= num_cards) || (num_cards == 0))
return (-ENODEV);
di.port = boards[brd].port ;
di.membase = boards[brd].membase ;
- if (copy_to_user((char *)arg, &di, sizeof (di)))
+ if (copy_to_user((void __user *)arg, &di, sizeof (di)))
return -EFAULT;
break;
epcaparam(tty,ch);
memoff(ch);
restore_flags(flags);
+ return 0;
}
static int pc_ioctl(struct tty_struct *tty, struct file * file,
{ /* Begin pc_ioctl */
digiflow_t dflow;
- int retval, error;
+ int retval;
unsigned long flags;
unsigned int mflag, mstat;
unsigned char startc, stopc;
volatile struct board_chan *bc;
struct channel *ch = (struct channel *) tty->driver_data;
+ void __user *argp = (void __user *)arg;
if (ch)
bc = ch->brdchan;
{ /* Begin switch cmd */
case TCGETS:
- if (copy_to_user((struct termios *)arg,
+ if (copy_to_user(argp,
tty->termios, sizeof(struct termios)))
return -EFAULT;
return(0);
case TCGETA:
- return get_termio(tty, (struct termio *)arg);
+ return get_termio(tty, argp);
case TCSBRK: /* SVID version: non-zero arg --> no break */
return 0;
case TIOCGSOFTCAR:
-
- error = verify_area(VERIFY_WRITE, (void *) arg,sizeof(long));
- if (error)
- return error;
-
- putUser(C_CLOCAL(tty) ? 1 : 0,
- (unsigned long *) arg);
+ if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg))
+ return -EFAULT;
return 0;
case TIOCSSOFTCAR:
- /*RONNIE PUT VERIFY_READ (See above) check here */
{
unsigned int value;
- getUser(value, (unsigned int *)arg);
+ if (get_user(value, (unsigned __user *)argp))
+ return -EFAULT;
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(value ? CLOCAL : 0));
case TIOCMODG:
mflag = pc_tiocmget(tty, file);
- if (putUser(mflag, (unsigned int *) arg))
+ if (put_user(mflag, (unsigned long __user *)argp))
return -EFAULT;
break;
case TIOCMODS:
- if (getUser(mstat, (unsigned int *)arg))
+ if (get_user(mstat, (unsigned __user *)argp))
return -EFAULT;
return pc_tiocmset(tty, file, mstat, ~mstat);
break;
case DIGI_GETA:
- if (copy_to_user((char*)arg, &ch->digiext,
- sizeof(digi_t)))
+ if (copy_to_user(argp, &ch->digiext, sizeof(digi_t)))
return -EFAULT;
break;
/* Fall Thru */
case DIGI_SETA:
- if (copy_from_user(&ch->digiext, (char*)arg,
- sizeof(digi_t)))
+ if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
return -EFAULT;
if (ch->digiext.digi_flags & DIGI_ALTPIN)
memoff(ch);
restore_flags(flags);
- if (copy_to_user((char*)arg, &dflow, sizeof(dflow)))
+ if (copy_to_user(argp, &dflow, sizeof(dflow)))
return -EFAULT;
break;
stopc = ch->stopca;
}
- if (copy_from_user(&dflow, (char*)arg, sizeof(dflow)))
+ if (copy_from_user(&dflow, argp, sizeof(dflow)))
return -EFAULT;
if (dflow.startc != startc || dflow.stopc != stopc)
/* --------------------- Begin get_termio ----------------------- */
-static int get_termio(struct tty_struct * tty, struct termio * termio)
+static int get_termio(struct tty_struct * tty, struct termio __user * termio)
{ /* Begin get_termio */
- int error;
-
- error = verify_area(VERIFY_WRITE, termio, sizeof (struct termio));
- if (error)
- return error;
-
- kernel_termios_to_user_termio(termio, tty->termios);
-
- return 0;
+ return kernel_termios_to_user_termio(termio, tty->termios);
} /* End get_termio */
/* ---------------------- Begin epca_setup -------------------------- */
void epca_setup(char *str, int *ints)
else if (request_dma(dma, "esp serial")) {
free_pages((unsigned long)dma_buffer,
get_order(DMA_BUFFER_SZ));
- dma_buffer = 0;
+ dma_buffer = NULL;
info->stat_flags |= ESP_STAT_USE_PIO;
}
free_dma(dma);
free_pages((unsigned long)dma_buffer,
get_order(DMA_BUFFER_SZ));
- dma_buffer = 0;
+ dma_buffer = NULL;
}
}
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = 0;
+ info->xmit_buf = NULL;
}
info->IER = 0;
*/
static int get_serial_info(struct esp_struct * info,
- struct serial_struct * retinfo)
+ struct serial_struct __user *retinfo)
{
struct serial_struct tmp;
- if (!retinfo)
- return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
tmp.line = info->line;
}
static int get_esp_config(struct esp_struct * info,
- struct hayes_esp_config * retinfo)
+ struct hayes_esp_config __user *retinfo)
{
struct hayes_esp_config tmp;
}
static int set_serial_info(struct esp_struct * info,
- struct serial_struct * new_info)
+ struct serial_struct __user *new_info)
{
struct serial_struct new_serial;
struct esp_struct old_info;
}
static int set_esp_config(struct esp_struct * info,
- struct hayes_esp_config * new_info)
+ struct hayes_esp_config __user * new_info)
{
struct hayes_esp_config new_config;
unsigned int change_dma;
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
-static int get_lsr_info(struct esp_struct * info, unsigned int *value)
+static int get_lsr_info(struct esp_struct * info, unsigned int __user *value)
{
unsigned char status;
unsigned int result;
{
struct esp_struct * info = (struct esp_struct *)tty->driver_data;
struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct *p_cuser; /* user space */
+ struct serial_icounter_struct __user *p_cuser; /* user space */
+ void __user *argp = (void __user *)arg;
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
switch (cmd) {
case TIOCGSERIAL:
- return get_serial_info(info,
- (struct serial_struct *) arg);
+ return get_serial_info(info, argp);
case TIOCSSERIAL:
- return set_serial_info(info,
- (struct serial_struct *) arg);
+ return set_serial_info(info, argp);
case TIOCSERCONFIG:
/* do not reconfigure after initial configuration */
return 0;
case TIOCSERGWILD:
- return put_user(0L, (unsigned long *) arg);
+ return put_user(0L, (unsigned long __user *)argp);
case TIOCSERGETLSR: /* Get line status register */
- return get_lsr_info(info, (unsigned int *) arg);
+ return get_lsr_info(info, argp);
case TIOCSERSWILD:
if (!capable(CAP_SYS_ADMIN))
cli();
cnow = info->icount;
sti();
- p_cuser = (struct serial_icounter_struct *) arg;
+ p_cuser = argp;
if (put_user(cnow.cts, &p_cuser->cts) ||
put_user(cnow.dsr, &p_cuser->dsr) ||
put_user(cnow.rng, &p_cuser->rng) ||
return 0;
case TIOCGHAYESESP:
- return (get_esp_config(info, (struct hayes_esp_config *)arg));
+ return get_esp_config(info, argp);
case TIOCSHAYESESP:
- return (set_esp_config(info, (struct hayes_esp_config *)arg));
+ return set_esp_config(info, argp);
default:
return -ENOIOCTLCMD;
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
info->event = 0;
- info->tty = 0;
+ info->tty = NULL;
if (info->blocked_open) {
if (info->close_delay) {
info->event = 0;
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = 0;
+ info->tty = NULL;
wake_up_interruptible(&info->open_wait);
}
int i, offset;
int region_start;
struct esp_struct * info;
- struct esp_struct *last_primary = 0;
+ struct esp_struct *last_primary = NULL;
int esp[] = {0x100,0x140,0x180,0x200,0x240,0x280,0x300,0x380};
esp_driver = alloc_tty_driver(NR_PORTS);
/* compress a block of memory, decompress a block of memory, or to identify */
/* itself. For more information, see the specification file "compress.h". */
-EXPORT void lzrw3_compress(action,wrk_mem,src_adr,src_len,dst_adr,p_dst_len)
-UWORD action; /* Action to be performed. */
-UBYTE *wrk_mem; /* Address of working memory we can use. */
-UBYTE *src_adr; /* Address of input data. */
-LONG src_len; /* Length of input data. */
-UBYTE *dst_adr; /* Address to put output data. */
-void *p_dst_len; /* Address of longword for length of output data. */
+EXPORT void lzrw3_compress(
+ UWORD action, /* Action to be performed. */
+ UBYTE *wrk_mem, /* Address of working memory we can use.*/
+ UBYTE *src_adr, /* Address of input data. */
+ LONG src_len, /* Length of input data. */
+ UBYTE *dst_adr, /* Address to put output data. */
+ void *p_dst_len /* Address of longword for length of output data.*/
+)
{
switch (action)
{
(((40543*(((*(PTR))<<8)^((*((PTR)+1))<<4)^(*((PTR)+2))))>>4) & 0xFFF)
/******************************************************************************/
-
-LOCAL void compress_compress
- (p_wrk_mem,p_src_first,src_len,p_dst_first,p_dst_len)
+
/* Input : Hand over the required amount of working memory in p_wrk_mem. */
/* Input : Specify input block using p_src_first and src_len. */
/* Input : Point p_dst_first to the start of the output zone (OZ). */
/* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. May */
/* Output : write in OZ=Mem[p_dst_first..p_dst_first+src_len+MAX_CMP_GROUP-1].*/
/* Output : Upon completion guaranteed *p_dst_len<=src_len+FLAG_BYTES. */
-UBYTE *p_wrk_mem;
-UBYTE *p_src_first;
-ULONG src_len;
-UBYTE *p_dst_first;
-LONG *p_dst_len;
+LOCAL void compress_compress(UBYTE *p_wrk_mem,
+ UBYTE *p_src_first, ULONG src_len,
+ UBYTE *p_dst_first, LONG *p_dst_len)
{
/* p_src and p_dst step through the source and destination blocks. */
register UBYTE *p_src = p_src_first;
/* to the hash table entry corresponding to the second youngest literal. */
/* Note: p_h1=0=>p_h2=0 because zero values denote absence of a pending */
/* literal. The variables are initialized to zero meaning an empty "buffer". */
- UBYTE **p_h1=0;
- UBYTE **p_h2=0;
+ UBYTE **p_h1=NULL;
+ UBYTE **p_h2=NULL;
/* To start, we write the flag bytes. Being optimistic, we set the flag to */
/* FLAG_COMPRESS. The remaining flag bytes are zeroed so as to keep the */
/* upon the arrival of extra context bytes. */
if (p_h1!=0)
{
- if (p_h2!=0)
- {*p_h2=p_ziv-2; p_h2=0;}
- *p_h1=p_ziv-1; p_h1=0;
+ if (p_h2)
+ {*p_h2=p_ziv-2; p_h2=NULL;}
+ *p_h1=p_ziv-1; p_h1=NULL;
}
/* In any case, we can update the hash table based on the current */
/******************************************************************************/
-LOCAL void compress_decompress
- (p_wrk_mem,p_src_first,src_len,p_dst_first,p_dst_len)
/* Input : Hand over the required amount of working memory in p_wrk_mem. */
/* Input : Specify input block using p_src_first and src_len. */
/* Input : Point p_dst_first to the start of the output zone. */
/* Output : Length of output block written to *p_dst_len. */
/* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. */
/* Output : Writes only in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. */
-UBYTE *p_wrk_mem;
-UBYTE *p_src_first;
-LONG src_len;
-UBYTE *p_dst_first;
-ULONG *p_dst_len;
+LOCAL void compress_decompress( UBYTE *p_wrk_mem,
+ UBYTE *p_src_first, LONG src_len,
+ UBYTE *p_dst_first, ULONG *p_dst_len)
{
/* Byte pointers p_src and p_dst scan through the input and output blocks. */
register UBYTE *p_src = p_src_first+FLAG_BYTES;
/* forward */
static int zftc_write(int *write_cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 *src_buf, const int req_len,
+ const __u8 __user *src_buf, const int req_len,
const zft_position *pos, const zft_volinfo *volume);
static int zftc_read(int *read_cnt,
- __u8 *dst_buf, const int to_do,
+ __u8 __user *dst_buf, const int to_do,
const __u8 *src_buf, const int seg_sz,
const zft_position *pos, const zft_volinfo *volume);
static int zftc_seek(unsigned int new_block_pos,
*/
static int zftc_write(int *write_cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 *src_buf, const int req_len,
+ const __u8 __user *src_buf, const int req_len,
const zft_position *pos, const zft_volinfo *volume)
{
int req_len_left = req_len;
* be set to 0
*/
static int zftc_read (int *read_cnt,
- __u8 *dst_buf, const int to_do,
+ __u8 __user *dst_buf, const int to_do,
const __u8 *src_buf, const int seg_sz,
const zft_position *pos, const zft_volinfo *volume)
{
TRACE(ft_t_info, "ftape_init @ 0x%p", ftape_init);
/* Allocate the DMA buffers. They are deallocated at cleanup() time.
*/
-#if TESTING
+#ifdef TESTING
#ifdef MODULE
while (ftape_set_nr_buffers(CONFIG_FT_NR_BUFFERS) < 0) {
ftape_sleep(FT_SECOND/20);
ptr += get_history_info(ptr);
len = strlen(page);
- *start = 0;
+ *start = NULL;
if (off+count >= len) {
*eof = 1;
} else {
/* IOCTL routine called by kernel-interface code
*/
-int _zft_ioctl(unsigned int command, void * arg)
+int _zft_ioctl(unsigned int command, void __user * arg)
{
int result;
union { struct mtop mtop;
*/
extern int _zft_open(unsigned int dev_minor, unsigned int access_mode);
extern int _zft_close(void);
-extern int _zft_ioctl(unsigned int command, void *arg);
+extern int _zft_ioctl(unsigned int command, void __user *arg);
#endif
static int zft_ioctl(struct inode *ino, struct file *filep,
unsigned int command, unsigned long arg);
static int zft_mmap(struct file *filep, struct vm_area_struct *vma);
-static ssize_t zft_read (struct file *fp, char *buff,
+static ssize_t zft_read (struct file *fp, char __user *buff,
size_t req_len, loff_t *ppos);
-static ssize_t zft_write(struct file *fp, const char *buff,
+static ssize_t zft_write(struct file *fp, const char __user *buff,
size_t req_len, loff_t *ppos);
static struct file_operations zft_cdev =
int result;
TRACE_FUN(ft_t_flow);
+ nonseekable_open(ino, filep);
TRACE(ft_t_flow, "called for minor %d", iminor(ino));
if ( test_and_set_bit(0,&busy_flag) ) {
TRACE_ABORT(-EBUSY, ft_t_warn, "failed: already busy");
old_sigmask = current->blocked; /* save mask */
sigfillset(¤t->blocked);
/* This will work as long as sizeof(void *) == sizeof(long) */
- result = _zft_ioctl(command, (void *) arg);
+ result = _zft_ioctl(command, (void __user *) arg);
current->blocked = old_sigmask; /* restore mask */
TRACE_EXIT result;
}
/* Read from floppy tape device
*/
-static ssize_t zft_read(struct file *fp, char *buff,
+static ssize_t zft_read(struct file *fp, char __user *buff,
size_t req_len, loff_t *ppos)
{
int result = -EIO;
/* Write to tape device
*/
-static ssize_t zft_write(struct file *fp, const char *buff,
+static ssize_t zft_write(struct file *fp, const char __user *buff,
size_t req_len, loff_t *ppos)
{
int result = -EIO;
struct zft_cmpr_ops {
int (*write)(int *write_cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 *src_buf, const int req_len,
+ const __u8 __user *src_buf, const int req_len,
const zft_position *pos, const zft_volinfo *volume);
int (*read)(int *read_cnt,
- __u8 *dst_buf, const int req_len,
+ __u8 __user *dst_buf, const int req_len,
const __u8 *src_buf, const int seg_sz,
const zft_position *pos, const zft_volinfo *volume);
int (*seek)(unsigned int new_block_pos,
* amount of data actually * copied to the user-buffer
*/
static int zft_simple_read (int *read_cnt,
- __u8 *dst_buf,
+ __u8 __user *dst_buf,
const int to_do,
const __u8 *src_buf,
const int seg_sz,
* req_len: how much data should be read at most.
* volume: contains information on current volume (blk_sz etc.)
*/
-static int empty_deblock_buf(__u8 *usr_buf, const int req_len,
+static int empty_deblock_buf(__u8 __user *usr_buf, const int req_len,
const __u8 *src_buf, const int seg_sz,
zft_position *pos,
const zft_volinfo *volume)
* use small block-sizes. The block-size may be 1kb (SECTOR_SIZE). In
* this case a MTFSR 28 maybe still inside the same segment.
*/
-int _zft_read(char* buff, int req_len)
+int _zft_read(char __user *buff, int req_len)
{
int req_clipped;
int result = 0;
0, FT_SEGMENT_SIZE)
/* hook for the VFS interface
*/
-extern int _zft_read(char* buff, int req_len);
+extern int _zft_read(char __user *buff, int req_len);
#endif /* _ZFTAPE_READ_H */
*/
static int zft_simple_write(int *cnt,
__u8 *dst_buf, const int seg_sz,
- const __u8 *src_buf, const int req_len,
+ const __u8 __user *src_buf, const int req_len,
const zft_position *pos,const zft_volinfo *volume)
{
int space_left;
static int fill_deblock_buf(__u8 *dst_buf, const int seg_sz,
zft_position *pos, const zft_volinfo *volume,
- const char *usr_buf, const int req_len)
+ const char __user *usr_buf, const int req_len)
{
int cnt = 0;
int result = 0;
/* called by the kernel-interface routine "zft_write()"
*/
-int _zft_write(const char* buff, int req_len)
+int _zft_write(const char __user *buff, int req_len)
{
int result = 0;
int written = 0;
/* hook for the VFS interface
*/
-extern int _zft_write(const char *buff, int req_len);
+extern int _zft_write(const char __user *buff, int req_len);
#endif /* _ZFTAPE_WRITE_H */
#define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __FUNCTION__)
#define func_exit() gs_dprintk (GS_DEBUG_FLOW, "gs: exit %s\n", __FUNCTION__)
-#if NEW_WRITE_LOCKING
+#ifdef NEW_WRITE_LOCKING
#define DECL /* Nothing */
#define LOCKIT down (& port->port_write_sem);
#define RELEASEIT up (&port->port_write_sem);
if (port->xmit_buf) {
free_page((unsigned long) port->xmit_buf);
- port->xmit_buf = 0;
+ port->xmit_buf = NULL;
}
if (port->tty)
port->event = 0;
port->rd->close (port);
port->rd->shutdown_port (port);
- port->tty = 0;
+ port->tty = NULL;
if (port->blocked_open) {
if (port->close_delay) {
}
-int gs_setserial(struct gs_port *port, struct serial_struct *sp)
+int gs_setserial(struct gs_port *port, struct serial_struct __user *sp)
{
struct serial_struct sio;
* Generate the serial struct info.
*/
-int gs_getserial(struct gs_port *port, struct serial_struct *sp)
+int gs_getserial(struct gs_port *port, struct serial_struct __user *sp)
{
struct serial_struct sio;
{
struct proc_dir_entry *r;
- r = create_proc_read_entry("driver/rtc", 0, 0, gen_rtc_read_proc, NULL);
+ r = create_proc_read_entry("driver/rtc", 0, NULL, gen_rtc_read_proc, NULL);
if (!r)
return -ENOMEM;
return 0;
/*
* Intel & MS High Precision Event Timer Implementation.
- * Contributors:
+ *
+ * Copyright (C) 2003 Intel Corporation
* Venki Pallipadi
- * Bob Picco
+ * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
+ * Bob Picco <robert.picco@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include <linux/config.h>
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
-#ifdef CONFIG_HPET_NOMMAP
- return -ENOSYS;
-#else
+#ifdef CONFIG_HPET_MMAP
struct hpet_dev *devp;
unsigned long addr;
}
return 0;
+#else
+ return -ENOSYS;
#endif
}
hdp->hd_nirqs = irqp->number_of_interrupts;
for (i = 0; i < hdp->hd_nirqs; i++)
-#ifdef CONFIG_IA64
hdp->hd_irq[i] =
acpi_register_gsi(irqp->interrupts[i],
irqp->edge_level,
irqp->active_high_low);
-#else
- hdp->hd_irq[i] = irqp->interrupts[i];
-#endif
}
}
static void set_params (i2ChanStrPtr, struct termios *);
static int set_modem_info(i2ChanStrPtr, unsigned int, unsigned int *);
-static int get_serial_info(i2ChanStrPtr, struct serial_struct *);
-static int set_serial_info(i2ChanStrPtr, struct serial_struct *);
+static int get_serial_info(i2ChanStrPtr, struct serial_struct __user *);
+static int set_serial_info(i2ChanStrPtr, struct serial_struct __user *);
-static ssize_t ip2_ipl_read(struct file *, char *, size_t, loff_t *);
-static ssize_t ip2_ipl_write(struct file *, const char *, size_t, loff_t *);
+static ssize_t ip2_ipl_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t ip2_ipl_write(struct file *, const char __user *, size_t, loff_t *);
static int ip2_ipl_ioctl(struct inode *, struct file *, UINT, ULONG);
static int ip2_ipl_open(struct inode *, struct file *);
-static int DumpTraceBuffer(char *, int);
-static int DumpFifoBuffer( char *, int);
+static int DumpTraceBuffer(char __user *, int);
+static int DumpFifoBuffer( char __user *, int);
static void ip2_init_board(int);
static unsigned short find_eisa_board(int);
/******************************************************************************/
static inline void
-service_all_boards()
+service_all_boards(void)
{
int i;
i2eBordStrPtr pB;
wait_queue_t wait;
i2ChanStrPtr pCh = DevTable[tty->index];
struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct *p_cuser; /* user space */
+ struct serial_icounter_struct __user *p_cuser;
int rc = 0;
unsigned long flags;
+ void __user *argp = (void __user *)arg;
if ( pCh == NULL ) {
return -ENODEV;
ip2trace (CHANN, ITRC_IOCTL, 2, 1, rc );
- rc = get_serial_info(pCh, (struct serial_struct *) arg);
+ rc = get_serial_info(pCh, argp);
if (rc)
return rc;
break;
ip2trace (CHANN, ITRC_IOCTL, 3, 1, rc );
- rc = set_serial_info(pCh, (struct serial_struct *) arg);
+ rc = set_serial_info(pCh, argp);
if (rc)
return rc;
break;
ip2trace (CHANN, ITRC_IOCTL, 6, 1, rc );
- rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
+ rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
if (rc)
return rc;
break;
ip2trace (CHANN, ITRC_IOCTL, 7, 1, rc );
- rc = get_user(arg,(unsigned long *) arg);
+ rc = get_user(arg,(unsigned long __user *) argp);
if (rc)
return rc;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL)
save_flags(flags);cli();
cnow = pCh->icount;
restore_flags(flags);
- p_cuser = (struct serial_icounter_struct *) arg;
+ p_cuser = argp;
rc = put_user(cnow.cts, &p_cuser->cts);
rc = put_user(cnow.dsr, &p_cuser->dsr);
rc = put_user(cnow.rng, &p_cuser->rng);
/* standard Linux serial structure. */
/******************************************************************************/
static int
-get_serial_info ( i2ChanStrPtr pCh, struct serial_struct *retinfo )
+get_serial_info ( i2ChanStrPtr pCh, struct serial_struct __user *retinfo )
{
struct serial_struct tmp;
- int rc;
-
- if ( !retinfo ) {
- return -EFAULT;
- }
memset ( &tmp, 0, sizeof(tmp) );
tmp.type = pCh->pMyBord->channelBtypes.bid_value[(pCh->port_index & (IP2_PORTS_PER_BOARD-1))/16];
tmp.close_delay = pCh->ClosingDelay;
tmp.closing_wait = pCh->ClosingWaitTime;
tmp.custom_divisor = pCh->BaudDivisor;
- rc = copy_to_user(retinfo,&tmp,sizeof(*retinfo));
- return rc;
+ return copy_to_user(retinfo,&tmp,sizeof(*retinfo));
}
/******************************************************************************/
/* change the IRQ, address or type of the port the ioctl fails. */
/******************************************************************************/
static int
-set_serial_info( i2ChanStrPtr pCh, struct serial_struct *new_info )
+set_serial_info( i2ChanStrPtr pCh, struct serial_struct __user *new_info )
{
struct serial_struct ns;
int old_flags, old_baud_divisor;
- if ( !new_info ) {
+ if (copy_from_user(&ns, new_info, sizeof (ns)))
return -EFAULT;
- }
-
- if (copy_from_user(&ns, new_info, sizeof (ns))) {
- return -EFAULT;
- }
/*
* We don't allow setserial to change IRQ, board address, type or baud
static
ssize_t
-ip2_ipl_read(struct file *pFile, char *pData, size_t count, loff_t *off )
+ip2_ipl_read(struct file *pFile, char __user *pData, size_t count, loff_t *off )
{
unsigned int minor = iminor(pFile->f_dentry->d_inode);
int rc = 0;
}
static int
-DumpFifoBuffer ( char *pData, int count )
+DumpFifoBuffer ( char __user *pData, int count )
{
#ifdef DEBUG_FIFO
int rc;
}
static int
-DumpTraceBuffer ( char *pData, int count )
+DumpTraceBuffer ( char __user *pData, int count )
{
#ifdef IP2DEBUG_TRACE
int rc;
int dumpcount;
int chunk;
- int *pIndex = (int*)pData;
+ int *pIndex = (int __user *)pData;
if ( count < (sizeof(int) * 6) ) {
return -EIO;
/* */
/******************************************************************************/
static ssize_t
-ip2_ipl_write(struct file *pFile, const char *pData, size_t count, loff_t *off)
+ip2_ipl_write(struct file *pFile, const char __user *pData, size_t count, loff_t *off)
{
#ifdef IP2DEBUG_IPL
printk (KERN_DEBUG "IP2IPL: write %p, %d bytes\n", pData, count );
{
unsigned int iplminor = iminor(pInode);
int rc = 0;
- ULONG *pIndex = (ULONG*)arg;
+ void __user *argp = (void __user *)arg;
+ ULONG __user *pIndex = argp;
i2eBordStrPtr pB = i2BoardPtrTable[iplminor / 4];
i2ChanStrPtr pCh;
case 65: /* Board - ip2stat */
if ( pB ) {
- rc = copy_to_user((char*)arg, (char*)pB, sizeof(i2eBordStr) );
+ rc = copy_to_user(argp, pB, sizeof(i2eBordStr));
rc = put_user(INB(pB->i2eStatus),
- (ULONG*)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) );
+ (ULONG __user *)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) );
} else {
rc = -ENODEV;
}
pCh = DevTable[cmd];
if ( pCh )
{
- rc = copy_to_user((char*)arg, (char*)pCh, sizeof(i2ChanStr) );
+ rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
} else {
rc = -ENODEV;
}
{
int rv;
struct ipmi_addr addr;
- unsigned char *msgdata;
+ struct kernel_ipmi_msg msg;
if (req->addr_len > sizeof(struct ipmi_addr))
return -EINVAL;
if (copy_from_user(&addr, req->addr, req->addr_len))
return -EFAULT;
- msgdata = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
- if (!msgdata)
+ msg.netfn = req->msg.netfn;
+ msg.cmd = req->msg.cmd;
+ msg.data_len = req->msg.data_len;
+ msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!msg.data)
return -ENOMEM;
/* From here out we cannot return, we must jump to "out" for
goto out;
}
- if (copy_from_user(msgdata,
+ if (copy_from_user(msg.data,
req->msg.data,
req->msg.data_len))
{
goto out;
}
} else {
- req->msg.data_len = 0;
+ msg.data_len = 0;
}
- req->msg.data = msgdata;
rv = ipmi_request_settime(user,
&addr,
req->msgid,
- &(req->msg),
+ &msg,
NULL,
0,
retries,
retry_time_ms);
out:
- kfree(msgdata);
+ kfree(msg.data);
return rv;
}
}
static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
struct ipmi_ipmb_addr *ipmb_addr,
long msgid,
unsigned char ipmb_seq,
}
static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
struct ipmi_lan_addr *lan_addr,
long msgid,
unsigned char ipmb_seq,
ipmi_smi_t intf,
struct ipmi_addr *addr,
long msgid,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
goto out_err;
}
-#if DEBUG_MSGING
+#ifdef DEBUG_MSGING
{
int m;
for (m=0; m<smi_msg->data_size; m++)
int ipmi_request(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
void *user_msg_data,
int priority)
{
int ipmi_request_settime(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
void *user_msg_data,
int priority,
int retries,
int ipmi_request_supply_msgs(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
int ipmi_request_with_source(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
void *user_msg_data,
int priority,
unsigned char source_address,
static int
send_channel_info_cmd(ipmi_smi_t intf, int chan)
{
- struct ipmi_msg msg;
+ struct kernel_ipmi_msg msg;
unsigned char data[1];
struct ipmi_system_interface_addr si;
msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
msg->data_size = 11;
-#if DEBUG_MSGING
+#ifdef DEBUG_MSGING
{
int m;
printk("Invalid command:");
int requeue;
int chan;
-#if DEBUG_MSGING
+#ifdef DEBUG_MSGING
int m;
printk("Recv:");
for (m=0; m<msg->rsp_size; m++)
MC, which don't get resent. */
intf->handlers->sender(intf->send_info, smi_msg, 0);
-#if DEBUG_MSGING
+#ifdef DEBUG_MSGING
{
int m;
printk("Resend: ");
static void send_panic_events(char *str)
{
- struct ipmi_msg msg;
+ struct kernel_ipmi_msg msg;
ipmi_smi_t intf;
unsigned char data[16];
int i;
200 /* priority: INT_MAX >= x >= 0 */
};
-static __init int ipmi_init_msghandler(void)
+static int ipmi_init_msghandler(void)
{
int i;
return 0;
}
+static __init int ipmi_init_msghandler_mod(void)
+{
+ ipmi_init_msghandler();
+ return 0;
+}
+
static __exit void cleanup_ipmi(void)
{
int count;
}
module_exit(cleanup_ipmi);
-module_init(ipmi_init_msghandler);
+module_init(ipmi_init_msghandler_mod);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ipmi_alloc_recv_msg);
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/ioport.h>
-#include <linux/irq.h>
+#include <asm/irq.h>
#ifdef CONFIG_HIGH_RES_TIMERS
#include <linux/hrtime.h>
# if defined(schedule_next_int)
static int acpi_failure = 0;
/* For GPE-type interrupts. */
-u32 ipmi_acpi_gpe(void *context)
+void ipmi_acpi_gpe(void *context)
{
struct smi_info *smi_info = context;
unsigned long flags;
smi_event_handler(smi_info, 0);
out:
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
- return 0;
}
static int acpi_gpe_irq_setup(struct smi_info *info)
#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
#endif
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
+static int nowayout = 1;
+#else
+static int nowayout;
+#endif
+
static ipmi_user_t watchdog_user = NULL;
/* Default the timeout to 10 seconds. */
module_param(start_now, int, 0);
MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
"soon as the driver is loaded.");
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
/* Default state of the timer. */
static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
struct ipmi_recv_msg *recv_msg,
int *send_heartbeat_now)
{
- struct ipmi_msg msg;
+ struct kernel_ipmi_msg msg;
unsigned char data[6];
int rv;
struct ipmi_system_interface_addr addr;
static int ipmi_heartbeat(void)
{
- struct ipmi_msg msg;
+ struct kernel_ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
static void panic_halt_ipmi_heartbeat(void)
{
- struct ipmi_msg msg;
+ struct kernel_ipmi_msg msg;
struct ipmi_system_interface_addr addr;
{
int rv;
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (len) {
rv = ipmi_heartbeat();
if (rv)
int rv = 0;
wait_queue_t wait;
- /* Can't seek (pread) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (count <= 0)
return 0;
/* Don't start the timer now, let it start on the
first heartbeat. */
ipmi_start_timer_on_heartbeat = 1;
- return(0);
+ return nonseekable_open(ino, filep);
default:
return (-ENODEV);
{
if (iminor(ino)==WATCHDOG_MINOR)
{
-#ifndef CONFIG_WATCHDOG_NOWAYOUT
- ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
- ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
-#endif
+ if (!nowayout) {
+ ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ }
ipmi_wdog_open = 0;
}
static void isicom_tx(unsigned long _data);
static void isicom_start(struct tty_struct * tty);
-static unsigned char * tmp_buf = 0;
+static unsigned char * tmp_buf;
static DECLARE_MUTEX(tmp_buf_sem);
/* baud index mappings from linux defns to isi */
unsigned long t;
unsigned short word_count, base;
bin_frame frame;
+ void __user *argp = (void __user *)arg;
/* exec_record exec_rec; */
- if(get_user(card, (int *)arg))
+ if(get_user(card, (int __user *)argp))
return -EFAULT;
if(card < 0 || card >= BOARD_COUNT)
return -EIO;
}
printk("-Done\n");
- return put_user(signature,(unsigned int*)arg);
+ return put_user(signature,(unsigned __user *)argp);
case MIOCTL_LOAD_FIRMWARE:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if(copy_from_user(&frame, (void *) arg, sizeof(bin_frame)))
+ if(copy_from_user(&frame, argp, sizeof(bin_frame)))
return -EFAULT;
if (WaitTillCardIsFree(base))
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if(copy_from_user(&frame, (void *) arg, sizeof(bin_header)))
+ if(copy_from_user(&frame, argp, sizeof(bin_header)))
return -EFAULT;
if (WaitTillCardIsFree(base))
return -EIO;
}
- if(copy_to_user((void *) arg, &frame, sizeof(bin_frame)))
+ if(copy_to_user(argp, &frame, sizeof(bin_frame)))
return -EFAULT;
return 0;
if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
- port->tty = 0;
+ port->tty = NULL;
if (port->blocked_open) {
if (port->close_delay) {
set_current_state(TASK_INTERRUPTIBLE);
}
static int isicom_set_serial_info(struct isi_port * port,
- struct serial_struct * info)
+ struct serial_struct __user *info)
{
struct serial_struct newinfo;
unsigned long flags;
}
static int isicom_get_serial_info(struct isi_port * port,
- struct serial_struct * info)
+ struct serial_struct __user *info)
{
struct serial_struct out_info;
unsigned int cmd, unsigned long arg)
{
struct isi_port * port = (struct isi_port *) tty->driver_data;
+ void __user *argp = (void __user *)arg;
int retval;
if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
return 0;
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
case TIOCSSOFTCAR:
- if(get_user(arg, (unsigned long *) arg))
+ if(get_user(arg, (unsigned long __user *) argp))
return -EFAULT;
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
return 0;
case TIOCGSERIAL:
- return isicom_get_serial_info(port,
- (struct serial_struct *) arg);
+ return isicom_get_serial_info(port, argp);
case TIOCSSERIAL:
- return isicom_set_serial_info(port,
- (struct serial_struct *) arg);
+ return isicom_set_serial_info(port, argp);
default:
return -ENOIOCTLCMD;
isicom_shutdown_port(port);
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
- port->tty = 0;
+ port->tty = NULL;
wake_up_interruptible(&port->open_wait);
}
static int stli_brdinit(stlibrd_t *brdp);
static int stli_startbrd(stlibrd_t *brdp);
-static ssize_t stli_memread(struct file *fp, char *buf, size_t count, loff_t *offp);
-static ssize_t stli_memwrite(struct file *fp, const char *buf, size_t count, loff_t *offp);
+static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp);
+static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp);
static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg);
static void stli_brdpoll(stlibrd_t *brdp, volatile cdkhdr_t *hdrp);
static void stli_poll(unsigned long arg);
static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts);
static long stli_mktiocm(unsigned long sigvalue);
static void stli_read(stlibrd_t *brdp, stliport_t *portp);
-static int stli_getserial(stliport_t *portp, struct serial_struct *sp);
-static int stli_setserial(stliport_t *portp, struct serial_struct *sp);
-static int stli_getbrdstats(combrd_t *bp);
-static int stli_getportstats(stliport_t *portp, comstats_t *cp);
+static int stli_getserial(stliport_t *portp, struct serial_struct __user *sp);
+static int stli_setserial(stliport_t *portp, struct serial_struct __user *sp);
+static int stli_getbrdstats(combrd_t __user *bp);
+static int stli_getportstats(stliport_t *portp, comstats_t __user *cp);
static int stli_portcmdstats(stliport_t *portp);
-static int stli_clrportstats(stliport_t *portp, comstats_t *cp);
-static int stli_getportstruct(unsigned long arg);
-static int stli_getbrdstruct(unsigned long arg);
+static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp);
+static int stli_getportstruct(stliport_t __user *arg);
+static int stli_getbrdstruct(stlibrd_t __user *arg);
static void *stli_memalloc(int len);
static stlibrd_t *stli_allocbrd(void);
{
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("init_module()\n");
#endif
unsigned long flags;
int i, j;
-#if DEBUG
+#ifdef DEBUG
printk("cleanup_module()\n");
#endif
* Check for any arguments passed in on the module load command line.
*/
-static void stli_argbrds()
+static void stli_argbrds(void)
{
stlconf_t conf;
stlibrd_t *brdp;
int nrargs, i;
-#if DEBUG
+#ifdef DEBUG
printk("stli_argbrds()\n");
#endif
char *sp;
int nrbrdnames, i;
-#if DEBUG
+#ifdef DEBUG
printk("stli_parsebrd(confp=%x,argp=%x)\n", (int) confp, (int) argp);
#endif
unsigned int minordev;
int brdnr, portnr, rc;
-#if DEBUG
+#ifdef DEBUG
printk("stli_open(tty=%x,filp=%x): device=%s\n", (int) tty,
(int) filp, tty->name);
#endif
stliport_t *portp;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stli_close(tty=%x,filp=%x)\n", (int) tty, (int) filp);
#endif
asyport_t aport;
int rc;
-#if DEBUG
+#ifdef DEBUG
printk("stli_initopen(brdp=%x,portp=%x)\n", (int) brdp, (int) portp);
#endif
unsigned long flags;
int rc;
-#if DEBUG
+#ifdef DEBUG
printk("stli_rawopen(brdp=%x,portp=%x,arg=%x,wait=%d)\n",
(int) brdp, (int) portp, (int) arg, wait);
#endif
unsigned long flags;
int rc;
-#if DEBUG
+#ifdef DEBUG
printk("stli_rawclose(brdp=%x,portp=%x,arg=%x,wait=%d)\n",
(int) brdp, (int) portp, (int) arg, wait);
#endif
{
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stli_cmdwait(brdp=%x,portp=%x,cmd=%x,arg=%x,size=%d,"
"copyback=%d)\n", (int) brdp, (int) portp, (int) cmd,
(int) arg, size, copyback);
stlibrd_t *brdp;
asyport_t aport;
-#if DEBUG
+#ifdef DEBUG
printk("stli_setport(portp=%x)\n", (int) portp);
#endif
static void stli_delay(int len)
{
-#if DEBUG
+#ifdef DEBUG
printk("stli_delay(len=%d)\n", len);
#endif
if (len > 0) {
unsigned long flags;
int rc, doclocal;
-#if DEBUG
+#ifdef DEBUG
printk("stli_waitcarrier(brdp=%x,portp=%x,filp=%x)\n",
(int) brdp, (int) portp, (int) filp);
#endif
unsigned int len, stlen, head, tail, size;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stli_write(tty=%x,from_user=%d,buf=%x,count=%d)\n",
(int) tty, from_user, (int) buf, count);
#endif
static void stli_putchar(struct tty_struct *tty, unsigned char ch)
{
-#if DEBUG
+#ifdef DEBUG
printk("stli_putchar(tty=%x,ch=%x)\n", (int) tty, (int) ch);
#endif
unsigned char *buf, *shbuf;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stli_flushchars(tty=%x)\n", (int) tty);
#endif
unsigned int head, tail, len;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stli_writeroom(tty=%x)\n", (int) tty);
#endif
unsigned int head, tail, len;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stli_charsinbuffer(tty=%x)\n", (int) tty);
#endif
* Generate the serial struct info.
*/
-static int stli_getserial(stliport_t *portp, struct serial_struct *sp)
+static int stli_getserial(stliport_t *portp, struct serial_struct __user *sp)
{
struct serial_struct sio;
stlibrd_t *brdp;
-#if DEBUG
+#ifdef DEBUG
printk("stli_getserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
* just quietly ignore any requests to change irq, etc.
*/
-static int stli_setserial(stliport_t *portp, struct serial_struct *sp)
+static int stli_setserial(stliport_t *portp, struct serial_struct __user *sp)
{
struct serial_struct sio;
int rc;
-#if DEBUG
- printk("stli_setserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
+#ifdef DEBUG
+ printk("stli_setserial(portp=%p,sp=%p)\n", portp, sp);
#endif
if (copy_from_user(&sio, sp, sizeof(struct serial_struct)))
stlibrd_t *brdp;
unsigned int ival;
int rc;
+ void __user *argp = (void __user *)arg;
-#if DEBUG
+#ifdef DEBUG
printk("stli_ioctl(tty=%x,file=%x,cmd=%x,arg=%x)\n",
(int) tty, (int) file, cmd, (int) arg);
#endif
switch (cmd) {
case TIOCGSOFTCAR:
rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
- (unsigned int *) arg);
+ (unsigned __user *) arg);
break;
case TIOCSSOFTCAR:
- if ((rc = get_user(ival, (unsigned int *) arg)) == 0)
+ if ((rc = get_user(ival, (unsigned __user *) arg)) == 0)
tty->termios->c_cflag =
(tty->termios->c_cflag & ~CLOCAL) |
(ival ? CLOCAL : 0);
break;
case TIOCGSERIAL:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(struct serial_struct))) == 0)
- rc = stli_getserial(portp, (struct serial_struct *) arg);
+ rc = stli_getserial(portp, argp);
break;
case TIOCSSERIAL:
- if ((rc = verify_area(VERIFY_READ, (void *) arg,
- sizeof(struct serial_struct))) == 0)
- rc = stli_setserial(portp, (struct serial_struct *)arg);
+ rc = stli_setserial(portp, argp);
break;
case STL_GETPFLAG:
- rc = put_user(portp->pflag, (unsigned int *) arg);
+ rc = put_user(portp->pflag, (unsigned __user *)argp);
break;
case STL_SETPFLAG:
- if ((rc = get_user(portp->pflag, (unsigned int *) arg)) == 0)
+ if ((rc = get_user(portp->pflag, (unsigned __user *)argp)) == 0)
stli_setport(portp);
break;
case COM_GETPORTSTATS:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(comstats_t))) == 0)
- rc = stli_getportstats(portp, (comstats_t *) arg);
+ rc = stli_getportstats(portp, argp);
break;
case COM_CLRPORTSTATS:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(comstats_t))) == 0)
- rc = stli_clrportstats(portp, (comstats_t *) arg);
+ rc = stli_clrportstats(portp, argp);
break;
case TIOCSERCONFIG:
case TIOCSERGWILD:
struct termios *tiosp;
asyport_t aport;
-#if DEBUG
+#ifdef DEBUG
printk("stli_settermios(tty=%x,old=%x)\n", (int) tty, (int) old);
#endif
{
stliport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stli_throttle(tty=%x)\n", (int) tty);
#endif
{
stliport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stli_unthrottle(tty=%x)\n", (int) tty);
#endif
stliport_t *portp;
asyctrl_t actrl;
-#if DEBUG
+#ifdef DEBUG
printk("stli_stop(tty=%x)\n", (int) tty);
#endif
stlibrd_t *brdp;
asyctrl_t actrl;
-#if DEBUG
+#ifdef DEBUG
printk("stli_start(tty=%x)\n", (int) tty);
#endif
{
stliport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_dohangup(portp=%x)\n", (int) arg);
#endif
stlibrd_t *brdp;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_hangup(tty=%x)\n", (int) tty);
#endif
stlibrd_t *brdp;
unsigned long ftype, flags;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_flushbuffer(tty=%x)\n", (int) tty);
#endif
long arg;
/* long savestate, savetime; */
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_breakctl(tty=%x,state=%d)\n", (int) tty, state);
#endif
stliport_t *portp;
unsigned long tend;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_waituntilsent(tty=%x,timeout=%x)\n", (int) tty, timeout);
#endif
stliport_t *portp;
asyctrl_t actrl;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_sendxchar(tty=%x,ch=%x)\n", (int) tty, ch);
#endif
int curoff, maxoff;
char *pos;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_readproc(page=%x,start=%x,off=%x,count=%d,eof=%x,"
"data=%x\n", (int) page, (int) start, (int) off, count,
(int) eof, (int) data);
volatile unsigned char *bits;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_sendcmd(brdp=%x,portp=%x,cmd=%x,arg=%x,size=%d,"
"copyback=%d)\n", (int) brdp, (int) portp, (int) cmd,
(int) arg, size, copyback);
unsigned int head, tail, size;
unsigned int len, stlen;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_read(brdp=%x,portp=%d)\n",
(int) brdp, (int) portp);
#endif
unsigned long oldsigs;
int rc, donerx;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_hostcmd(brdp=%x,channr=%d)\n",
(int) brdp, channr);
#endif
static void stli_mkasyport(stliport_t *portp, asyport_t *pp, struct termios *tiosp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_mkasyport(portp=%x,pp=%x,tiosp=%d)\n",
(int) portp, (int) pp, (int) tiosp);
#endif
static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_mkasysigs(sp=%x,dtr=%d,rts=%d)\n",
(int) sp, dtr, rts);
#endif
{
long tiocm;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_mktiocm(sigvalue=%x)\n", (int) sigvalue);
#endif
stliport_t *portp;
int i, panelnr, panelport;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_initports(brdp=%x)\n", (int) brdp);
#endif
{
unsigned long memconf;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_ecpinit(brdp=%d)\n", (int) brdp);
#endif
static void stli_ecpenable(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_ecpenable(brdp=%x)\n", (int) brdp);
#endif
outb(ECP_ATENABLE, (brdp->iobase + ECP_ATCONFR));
static void stli_ecpdisable(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_ecpdisable(brdp=%x)\n", (int) brdp);
#endif
outb(ECP_ATDISABLE, (brdp->iobase + ECP_ATCONFR));
void *ptr;
unsigned char val;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_ecpgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = 0;
+ ptr = NULL;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_ATPAGESIZE);
static void stli_ecpreset(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_ecpreset(brdp=%x)\n", (int) brdp);
#endif
static void stli_ecpintr(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_ecpintr(brdp=%x)\n", (int) brdp);
#endif
outb(0x1, brdp->iobase);
{
unsigned long memconf;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_ecpeiinit(brdp=%x)\n", (int) brdp);
#endif
void *ptr;
unsigned char val;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_ecpeigetmemptr(brdp=%x,offset=%x,line=%d)\n",
(int) brdp, (int) offset, line);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = 0;
+ ptr = NULL;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_EIPAGESIZE);
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = 0;
+ ptr = NULL;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_MCPAGESIZE);
static void stli_ecppciinit(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_ecppciinit(brdp=%x)\n", (int) brdp);
#endif
void *ptr;
unsigned char val;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_ecppcigetmemptr(brdp=%x,offset=%x,line=%d)\n",
(int) brdp, (int) offset, line);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), board=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = 0;
+ ptr = NULL;
val = 0;
} else {
ptr = brdp->membase + (offset % ECP_PCIPAGESIZE);
{
unsigned long memconf;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_onbinit(brdp=%d)\n", (int) brdp);
#endif
static void stli_onbenable(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_onbenable(brdp=%x)\n", (int) brdp);
#endif
outb((brdp->enabval | ONB_ATENABLE), (brdp->iobase + ONB_ATCONFR));
static void stli_onbdisable(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_onbdisable(brdp=%x)\n", (int) brdp);
#endif
outb((brdp->enabval | ONB_ATDISABLE), (brdp->iobase + ONB_ATCONFR));
{
void *ptr;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_onbgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = 0;
+ ptr = NULL;
} else {
ptr = brdp->membase + (offset % ONB_ATPAGESIZE);
}
static void stli_onbreset(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_onbreset(brdp=%x)\n", (int) brdp);
#endif
{
unsigned long memconf;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_onbeinit(brdp=%d)\n", (int) brdp);
#endif
static void stli_onbeenable(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_onbeenable(brdp=%x)\n", (int) brdp);
#endif
outb(ONB_EIENABLE, (brdp->iobase + ONB_EICONFR));
static void stli_onbedisable(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_onbedisable(brdp=%x)\n", (int) brdp);
#endif
outb(ONB_EIDISABLE, (brdp->iobase + ONB_EICONFR));
void *ptr;
unsigned char val;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_onbegetmemptr(brdp=%x,offset=%x,line=%d)\n",
(int) brdp, (int) offset, line);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = 0;
+ ptr = NULL;
val = 0;
} else {
ptr = brdp->membase + (offset % ONB_EIPAGESIZE);
static void stli_onbereset(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_ERR "stli_onbereset(brdp=%x)\n", (int) brdp);
#endif
static void stli_bbyinit(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_ERR "stli_bbyinit(brdp=%d)\n", (int) brdp);
#endif
void *ptr;
unsigned char val;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_ERR "stli_bbygetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = 0;
+ ptr = NULL;
val = 0;
} else {
ptr = brdp->membase + (offset % BBY_PAGESIZE);
static void stli_bbyreset(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_bbyreset(brdp=%x)\n", (int) brdp);
#endif
static void stli_stalinit(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_stalinit(brdp=%d)\n", (int) brdp);
#endif
{
void *ptr;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_stalgetmemptr(brdp=%x,offset=%x)\n", (int) brdp,
(int) offset);
#endif
printk(KERN_ERR "STALLION: shared memory pointer=%x out of "
"range at line=%d(%d), brd=%d\n",
(int) offset, line, __LINE__, brdp->brdnr);
- ptr = 0;
+ ptr = NULL;
} else {
ptr = brdp->membase + (offset % STAL_PAGESIZE);
}
{
volatile unsigned long *vecp;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_stalreset(brdp=%x)\n", (int) brdp);
#endif
char *name;
int panelnr, nrports;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_initecp(brdp=%x)\n", (int) brdp);
#endif
char *name;
int i;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_initonb(brdp=%x)\n", (int) brdp);
#endif
stliport_t *portp;
int portnr, nrdevs, i, rc;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_startbrd(brdp=%x)\n", (int) brdp);
#endif
static int __init stli_brdinit(stlibrd_t *brdp)
{
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_brdinit(brdp=%x)\n", (int) brdp);
#endif
cdkonbsig_t onbsig, *onbsigp;
int i, foundit;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_eisamemprobe(brdp=%x)\n", (int) brdp);
#endif
if (! foundit) {
brdp->memaddr = 0;
- brdp->membase = 0;
+ brdp->membase = NULL;
printk(KERN_ERR "STALLION: failed to probe shared memory "
"region for %s in EISA slot=%d\n",
stli_brdnames[brdp->brdtype], (brdp->iobase >> 12));
return(0);
}
+static inline int stli_getbrdnr(void)
+{
+ int i;
+
+ for (i = 0; i < STL_MAXBRDS; i++) {
+ if (!stli_brds[i]) {
+ if (i >= stli_nrbrds)
+ stli_nrbrds = i + 1;
+ return i;
+ }
+ }
+ return -1;
+}
+
/*****************************************************************************/
/*
* do is go probing around in the usual places hoping we can find it.
*/
-static inline int stli_findeisabrds()
+static inline int stli_findeisabrds(void)
{
stlibrd_t *brdp;
unsigned int iobase, eid;
int i;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_findeisabrds()\n");
#endif
* Find the next available board number that is free.
*/
-static inline int stli_getbrdnr()
-{
- int i;
-
- for (i = 0; (i < STL_MAXBRDS); i++) {
- if (stli_brds[i] == (stlibrd_t *) NULL) {
- if (i >= stli_nrbrds)
- stli_nrbrds = i + 1;
- return(i);
- }
- }
- return(-1);
-}
-
/*****************************************************************************/
#ifdef CONFIG_PCI
{
stlibrd_t *brdp;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_initpcibrd(brdtype=%d,busnr=%x,devnr=%x)\n",
brdtype, dev->bus->number, dev->devfn);
#endif
}
brdp->brdtype = brdtype;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "%s(%d): BAR[]=%lx,%lx,%lx,%lx\n", __FILE__, __LINE__,
pci_resource_start(devp, 0),
pci_resource_start(devp, 1),
* one as it is found.
*/
-static inline int stli_findpcibrds()
+static inline int stli_findpcibrds(void)
{
struct pci_dev *dev = NULL;
int rc;
-#if DEBUG
+#ifdef DEBUG
printk("stli_findpcibrds()\n");
#endif
* Allocate a new board structure. Fill out the basic info in it.
*/
-static stlibrd_t *stli_allocbrd()
+static stlibrd_t *stli_allocbrd(void)
{
stlibrd_t *brdp;
* can find.
*/
-static inline int stli_initbrds()
+static inline int stli_initbrds(void)
{
stlibrd_t *brdp, *nxtbrdp;
stlconf_t *confp;
int i, j;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_initbrds()\n");
#endif
* the slave image (and debugging :-)
*/
-static ssize_t stli_memread(struct file *fp, char *buf, size_t count, loff_t *offp)
+static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp)
{
unsigned long flags;
void *memptr;
stlibrd_t *brdp;
int brdnr, size, n;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_memread(fp=%x,buf=%x,count=%x,offp=%x)\n",
(int) fp, (int) buf, count, (int) offp);
#endif
* the slave image (and debugging :-)
*/
-static ssize_t stli_memwrite(struct file *fp, const char *buf, size_t count, loff_t *offp)
+static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp)
{
unsigned long flags;
void *memptr;
stlibrd_t *brdp;
- char *chbuf;
+ char __user *chbuf;
int brdnr, size, n;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_memwrite(fp=%x,buf=%x,count=%x,offp=%x)\n",
(int) fp, (int) buf, count, (int) offp);
#endif
if (fp->f_pos >= brdp->memsize)
return(0);
- chbuf = (char *) buf;
+ chbuf = (char __user *) buf;
size = MIN(count, (brdp->memsize - fp->f_pos));
save_flags(flags);
* Return the board stats structure to user app.
*/
-static int stli_getbrdstats(combrd_t *bp)
+static int stli_getbrdstats(combrd_t __user *bp)
{
stlibrd_t *brdp;
int i;
* what port to get stats for (used through board control device).
*/
-static int stli_getportstats(stliport_t *portp, comstats_t *cp)
+static int stli_getportstats(stliport_t *portp, comstats_t __user *cp)
{
stlibrd_t *brdp;
int rc;
- if (portp == (stliport_t *) NULL) {
+ if (!portp) {
if (copy_from_user(&stli_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stli_getport(stli_comstats.brd, stli_comstats.panel,
stli_comstats.port);
- if (portp == (stliport_t *) NULL)
- return(-ENODEV);
+ if (!portp)
+ return -ENODEV;
}
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(-ENODEV);
+ if (!brdp)
+ return -ENODEV;
if ((rc = stli_portcmdstats(portp)) < 0)
- return(rc);
+ return rc;
return copy_to_user(cp, &stli_comstats, sizeof(comstats_t)) ?
-EFAULT : 0;
* Clear the port stats structure. We also return it zeroed out...
*/
-static int stli_clrportstats(stliport_t *portp, comstats_t *cp)
+static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp)
{
stlibrd_t *brdp;
int rc;
- if (portp == (stliport_t *) NULL) {
+ if (!portp) {
if (copy_from_user(&stli_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stli_getport(stli_comstats.brd, stli_comstats.panel,
stli_comstats.port);
- if (portp == (stliport_t *) NULL)
- return(-ENODEV);
+ if (!portp)
+ return -ENODEV;
}
brdp = stli_brds[portp->brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(-ENODEV);
+ if (!brdp)
+ return -ENODEV;
if (brdp->state & BST_STARTED) {
- if ((rc = stli_cmdwait(brdp, portp, A_CLEARSTATS, 0, 0, 0)) < 0)
- return(rc);
+ if ((rc = stli_cmdwait(brdp, portp, A_CLEARSTATS, NULL, 0, 0)) < 0)
+ return rc;
}
memset(&stli_comstats, 0, sizeof(comstats_t));
if (copy_to_user(cp, &stli_comstats, sizeof(comstats_t)))
return -EFAULT;
- return(0);
+ return 0;
}
/*****************************************************************************/
* Return the entire driver ports structure to a user app.
*/
-static int stli_getportstruct(unsigned long arg)
+static int stli_getportstruct(stliport_t __user *arg)
{
stliport_t *portp;
- if (copy_from_user(&stli_dummyport, (void *)arg, sizeof(stliport_t)))
+ if (copy_from_user(&stli_dummyport, arg, sizeof(stliport_t)))
return -EFAULT;
portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
stli_dummyport.portnr);
- if (portp == (stliport_t *) NULL)
- return(-ENODEV);
- if (copy_to_user((void *) arg, portp, sizeof(stliport_t)))
+ if (!portp)
+ return -ENODEV;
+ if (copy_to_user(arg, portp, sizeof(stliport_t)))
return -EFAULT;
- return(0);
+ return 0;
}
/*****************************************************************************/
* Return the entire driver board structure to a user app.
*/
-static int stli_getbrdstruct(unsigned long arg)
+static int stli_getbrdstruct(stlibrd_t __user *arg)
{
stlibrd_t *brdp;
- if (copy_from_user(&stli_dummybrd, (void *)arg, sizeof(stlibrd_t)))
+ if (copy_from_user(&stli_dummybrd, arg, sizeof(stlibrd_t)))
return -EFAULT;
if ((stli_dummybrd.brdnr < 0) || (stli_dummybrd.brdnr >= STL_MAXBRDS))
- return(-ENODEV);
+ return -ENODEV;
brdp = stli_brds[stli_dummybrd.brdnr];
- if (brdp == (stlibrd_t *) NULL)
- return(-ENODEV);
- if (copy_to_user((void *) arg, brdp, sizeof(stlibrd_t)))
+ if (!brdp)
+ return -ENODEV;
+ if (copy_to_user(arg, brdp, sizeof(stlibrd_t)))
return -EFAULT;
- return(0);
+ return 0;
}
/*****************************************************************************/
{
stlibrd_t *brdp;
int brdnr, rc, done;
+ void __user *argp = (void __user *)arg;
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "stli_memioctl(ip=%x,fp=%x,cmd=%x,arg=%x)\n",
(int) ip, (int) fp, cmd, (int) arg);
#endif
switch (cmd) {
case COM_GETPORTSTATS:
- rc = stli_getportstats((stliport_t *)NULL, (comstats_t *)arg);
+ rc = stli_getportstats(NULL, argp);
done++;
break;
case COM_CLRPORTSTATS:
- rc = stli_clrportstats((stliport_t *)NULL, (comstats_t *)arg);
+ rc = stli_clrportstats(NULL, argp);
done++;
break;
case COM_GETBRDSTATS:
- rc = stli_getbrdstats((combrd_t *) arg);
+ rc = stli_getbrdstats(argp);
done++;
break;
case COM_READPORT:
- rc = stli_getportstruct(arg);
+ rc = stli_getportstruct(argp);
done++;
break;
case COM_READBOARD:
- rc = stli_getbrdstruct(arg);
+ rc = stli_getbrdstruct(argp);
done++;
break;
}
if (brdnr >= STL_MAXBRDS)
return(-ENODEV);
brdp = stli_brds[brdnr];
- if (brdp == (stlibrd_t *) NULL)
+ if (!brdp)
return(-ENODEV);
if (brdp->state == 0)
return(-ENODEV);
}
#endif
-extern int page_is_ram(unsigned long pagenr);
-
-static inline int page_is_allowed(unsigned long pagenr)
-{
- #ifdef CONFIG_X86
- if (pagenr <= 256)
- return 1;
- if (!page_is_ram(pagenr))
- return 1;
- printk("Access to 0x%lx by %s denied \n", pagenr << PAGE_SHIFT, current->comm);
- return 0;
- #else
- return 1;
- #endif
-}
-
static inline int range_is_allowed(unsigned long from, unsigned long to)
{
unsigned long cursor;
cursor = from >> PAGE_SHIFT;
- while ( (cursor << PAGE_SHIFT) < to) {
- if (!page_is_allowed(cursor))
+ while ((cursor << PAGE_SHIFT) < to) {
+ if (!devmem_is_allowed(cursor))
return 0;
cursor++;
}
}
#endif
if (!range_is_allowed(realp, realp+count))
- return -EFAULT;
+ return -EPERM;
copied = copy_from_user(p, buf, count);
if (copied) {
ssize_t ret = written + (count - copied);
}
#endif
if (!range_is_allowed(p, p+count))
- return -EFAULT;
+ return -EPERM;
if (copy_to_user(buf, __va(p), count))
return -EFAULT;
read += count;
cursor = vma->vm_pgoff;
while ((cursor << PAGE_SHIFT) < offset + vma->vm_end-vma->vm_start) {
- if (!page_is_allowed(cursor))
- return -EFAULT;
+ if (!devmem_is_allowed(cursor))
+ return -EPERM;
cursor++;
}
return virtr + read;
}
-/*
- * This function writes to the *virtual* memory as seen by the kernel.
- */
-static ssize_t write_kmem(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- unsigned long p = *ppos;
- ssize_t wrote = 0;
- ssize_t virtr = 0;
- ssize_t written;
- char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
-
- return -EPERM;
-
- if (p < (unsigned long) high_memory) {
-
- wrote = count;
- if (count > (unsigned long) high_memory - p)
- wrote = (unsigned long) high_memory - p;
-
- written = do_write_mem((void*)p, p, buf, wrote, ppos);
- if (written != wrote)
- return written;
- wrote = written;
- p += wrote;
- buf += wrote;
- count -= wrote;
- }
-
- if (count > 0) {
- kbuf = (char *)__get_free_page(GFP_KERNEL);
- if (!kbuf)
- return wrote ? wrote : -ENOMEM;
- while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- if (len) {
- written = copy_from_user(kbuf, buf, len);
- if (written) {
- ssize_t ret;
-
- free_page((unsigned long)kbuf);
- ret = wrote + virtr + (len - written);
- return ret ? ret : -EFAULT;
- }
- }
- len = vwrite(kbuf, (char *)p, len);
- count -= len;
- buf += len;
- virtr += len;
- p += len;
- }
- free_page((unsigned long)kbuf);
- }
-
- *ppos = p;
- return virtr + wrote;
-}
-
#if defined(CONFIG_ISA) || !defined(__mc68000__)
static ssize_t read_port(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
static struct file_operations kmem_fops = {
.llseek = memory_lseek,
.read = read_kmem,
- .write = write_kmem,
.mmap = mmap_kmem,
.open = open_kmem,
};
"CP-204J series",
};
+#ifdef CONFIG_PCI
static struct pci_device_id moxa_pcibrds[] = {
{ PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_C218, PCI_ANY_ID, PCI_ANY_ID,
0, 0, MOXA_BOARD_C218_PCI },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, moxa_pcibrds);
+#endif /* CONFIG_PCI */
typedef struct _moxa_isa_board_conf {
int boardType;
static int verbose = 0;
static int ttymajor = MOXAMAJOR;
/* Variables for insmod */
+#ifdef MODULE
static int baseaddr[] = {0, 0, 0, 0};
static int type[] = {0, 0, 0, 0};
static int numports[] = {0, 0, 0, 0};
+#endif
MODULE_AUTHOR("William Chen");
MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
/*
* static functions:
*/
-static int moxa_get_PCI_conf(struct pci_dev *, int, moxa_board_conf *);
static void do_moxa_softint(void *);
static int moxa_open(struct tty_struct *, struct file *);
static void moxa_close(struct tty_struct *, struct file *);
static void MoxaPortTxEnable(int);
static int MoxaPortResetBrkCnt(int);
static void MoxaPortSendBreak(int, int);
-static int moxa_get_serial_info(struct moxa_str *, struct serial_struct *);
-static int moxa_set_serial_info(struct moxa_str *, struct serial_struct *);
+static int moxa_get_serial_info(struct moxa_str *, struct serial_struct __user *);
+static int moxa_set_serial_info(struct moxa_str *, struct serial_struct __user *);
static void MoxaSetFifo(int port, int enable);
static struct tty_operations moxa_ops = {
.tiocmset = moxa_tiocmset,
};
+#ifdef CONFIG_PCI
+static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
+{
+ board->baseAddr = pci_resource_start (p, 2);
+ board->boardType = board_type;
+ switch (board_type) {
+ case MOXA_BOARD_C218_ISA:
+ case MOXA_BOARD_C218_PCI:
+ board->numPorts = 8;
+ break;
+
+ case MOXA_BOARD_CP204J:
+ board->numPorts = 4;
+ break;
+ default:
+ board->numPorts = 0;
+ break;
+ }
+ board->busType = MOXA_BUS_TYPE_PCI;
+ board->pciInfo.busNum = p->bus->number;
+ board->pciInfo.devNum = p->devfn >> 3;
+
+ return (0);
+}
+#endif /* CONFIG_PCI */
+
static int __init moxa_init(void)
{
int i, numBoards;
moxaDriver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(moxaDriver, &moxa_ops);
- moxaXmitBuff = 0;
+ moxaXmitBuff = NULL;
for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
ch->type = PORT_16550A;
ch->port = i;
INIT_WORK(&ch->tqueue, do_moxa_softint, ch);
- ch->tty = 0;
+ ch->tty = NULL;
ch->close_delay = 5 * HZ / 10;
ch->closing_wait = 30 * HZ;
ch->count = 0;
module_init(moxa_init);
module_exit(moxa_exit);
-static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
-{
- board->baseAddr = pci_resource_start (p, 2);
- board->boardType = board_type;
- switch (board_type) {
- case MOXA_BOARD_C218_ISA:
- case MOXA_BOARD_C218_PCI:
- board->numPorts = 8;
- break;
-
- case MOXA_BOARD_CP204J:
- board->numPorts = 4;
- break;
- default:
- board->numPorts = 0;
- break;
- }
- board->busType = MOXA_BUS_TYPE_PCI;
- board->pciInfo.busNum = p->bus->number;
- board->pciInfo.devNum = p->devfn >> 3;
-
- return (0);
-}
-
static void do_moxa_softint(void *private_)
{
struct moxa_str *ch = (struct moxa_str *) private_;
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
ch->event = 0;
- ch->tty = 0;
+ ch->tty = NULL;
if (ch->blocked_open) {
if (ch->close_delay) {
set_current_state(TASK_INTERRUPTIBLE);
{
struct moxa_str *ch = (struct moxa_str *) tty->driver_data;
register int port;
+ void __user *argp = (void __user *)arg;
int retval;
port = PORTNO(tty);
MoxaPortSendBreak(ch->port, arg);
return (0);
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) argp);
case TIOCSSOFTCAR:
- if(get_user(retval, (unsigned long *) arg))
+ if(get_user(retval, (unsigned long __user *) argp))
return -EFAULT;
arg = retval;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) |
ch->asyncflags |= ASYNC_CHECK_CD;
return (0);
case TIOCGSERIAL:
- return (moxa_get_serial_info(ch, (struct serial_struct *) arg));
+ return moxa_get_serial_info(ch, argp);
case TIOCSSERIAL:
- return (moxa_set_serial_info(ch, (struct serial_struct *) arg));
+ return moxa_set_serial_info(ch, argp);
default:
retval = MoxaDriverIoctl(cmd, arg, port);
}
ch->event = 0;
ch->count = 0;
ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
- ch->tty = 0;
+ ch->tty = NULL;
wake_up_interruptible(&ch->open_wait);
}
unsigned char *charptr, *flagptr;
unsigned long flags;
- ts = 0;
+ ts = NULL;
tp = ch->tty;
if (tp)
ts = tp->termios;
static void moxafunc(unsigned long, int, ushort);
static void wait_finish(unsigned long);
static void low_water_check(unsigned long);
-static int moxaloadbios(int, unsigned char *, int);
+static int moxaloadbios(int, unsigned char __user *, int);
static int moxafindcard(int);
-static int moxaload320b(int, unsigned char *, int);
-static int moxaloadcode(int, unsigned char *, int);
+static int moxaload320b(int, unsigned char __user *, int);
+static int moxaloadcode(int, unsigned char __user *, int);
static int moxaloadc218(int, unsigned long, int);
static int moxaloadc320(int, unsigned long, int, int *);
};
struct dl_str {
- char *buf;
+ char __user *buf;
int len;
int cardno;
};
int i;
int status;
int MoxaPortTxQueue(int), MoxaPortRxQueue(int);
+ void __user *argp = (void __user *)arg;
if (port == QueryPort) {
if ((cmd != MOXA_GET_CONF) && (cmd != MOXA_INIT_DRIVER) &&
}
switch (cmd) {
case MOXA_GET_CONF:
- if(copy_to_user((void *)arg, &moxa_boards, MAX_BOARDS * sizeof(moxa_board_conf)))
+ if(copy_to_user(argp, &moxa_boards, MAX_BOARDS * sizeof(moxa_board_conf)))
return -EFAULT;
return (0);
case MOXA_INIT_DRIVER:
return (0);
case MOXA_GETDATACOUNT:
moxaLog.tick = jiffies;
- if(copy_to_user((void *)arg, &moxaLog, sizeof(mon_st)))
+ if(copy_to_user(argp, &moxaLog, sizeof(mon_st)))
return -EFAULT;
return (0);
case MOXA_FLUSH_QUEUE:
temp_queue[i].outq = MoxaPortTxQueue(i);
}
}
- if(copy_to_user((void *)arg, temp_queue, sizeof(struct moxaq_str) * MAX_PORTS))
+ if(copy_to_user(argp, temp_queue, sizeof(struct moxaq_str) * MAX_PORTS))
return -EFAULT;
return (0);
case MOXA_GET_OQUEUE:
i = MoxaPortTxQueue(port);
- return put_user(i, (unsigned long *) arg);
+ return put_user(i, (unsigned long __user *)argp);
case MOXA_GET_IQUEUE:
i = MoxaPortRxQueue(port);
- return put_user(i, (unsigned long *) arg);
+ return put_user(i, (unsigned long __user *)argp);
case MOXA_GET_MAJOR:
- if(copy_to_user((void *)arg, &ttymajor, sizeof(int)))
+ if(copy_to_user(argp, &ttymajor, sizeof(int)))
return -EFAULT;
return 0;
case MOXA_GET_CUMAJOR:
i = 0;
- if(copy_to_user((void *)arg, &i, sizeof(int)))
+ if(copy_to_user(argp, &i, sizeof(int)))
return -EFAULT;
return 0;
case MOXA_GETMSTATUS:
else
GMStatus[i].cflag = moxaChannels[i].tty->termios->c_cflag;
}
- if(copy_to_user((void *)arg, GMStatus, sizeof(struct mxser_mstatus) * MAX_PORTS))
+ if(copy_to_user(argp, GMStatus, sizeof(struct mxser_mstatus) * MAX_PORTS))
return -EFAULT;
return 0;
default:
break;
}
- if(copy_from_user(&dltmp, (void *)arg, sizeof(struct dl_str)))
+ if(copy_from_user(&dltmp, argp, sizeof(struct dl_str)))
return -EFAULT;
if(dltmp.cardno < 0 || dltmp.cardno >= MAX_BOARDS)
return -EINVAL;
}
static int moxa_get_serial_info(struct moxa_str *info,
- struct serial_struct *retinfo)
+ struct serial_struct __user *retinfo)
{
struct serial_struct tmp;
- if (!retinfo)
- return (-EFAULT);
memset(&tmp, 0, sizeof(tmp));
tmp.type = info->type;
tmp.line = info->port;
static int moxa_set_serial_info(struct moxa_str *info,
- struct serial_struct *new_info)
+ struct serial_struct __user *new_info)
{
struct serial_struct new_serial;
}
}
-static int moxaloadbios(int cardno, unsigned char *tmp, int len)
+static int moxaloadbios(int cardno, unsigned char __user *tmp, int len)
{
unsigned long baseAddr;
int i;
return (0);
}
-static int moxaload320b(int cardno, unsigned char * tmp, int len)
+static int moxaload320b(int cardno, unsigned char __user *tmp, int len)
{
unsigned long baseAddr;
int i;
return (0);
}
-static int moxaloadcode(int cardno, unsigned char * tmp, int len)
+static int moxaloadcode(int cardno, unsigned char __user *tmp, int len)
{
unsigned long baseAddr, ofsAddr;
int retval, port, i;
unsigned int retval = 0;
PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_open, entry inode %x file %x\n",
- (int) inode, (int) file);
+ "mwavedd::mwave_open, entry inode %p file %p\n",
+ inode, file);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_open, exit return retval %x\n", retval);
unsigned int retval = 0;
PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_close, entry inode %x file %x\n",
- (int) inode, (int) file);
+ "mwavedd::mwave_close, entry inode %p file %p\n",
+ inode, file);
PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
retval);
void __user *arg = (void __user *)ioarg;
PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, entry inode %x file %x cmd %x arg %x\n",
- (int) inode, (int) file, iocmd, (int) ioarg);
+ "mwavedd::mwave_ioctl, entry inode %p file %p cmd %x arg %x\n",
+ inode, file, iocmd, (int) ioarg);
switch (iocmd) {
loff_t * ppos)
{
PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_read entry file %p, buf %p, count %x ppos %p\n",
+ "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
file, buf, count, ppos);
return -EINVAL;
{
PRINTK_5(TRACE_MWAVE,
"mwavedd::mwave_write entry file %p, buf %p,"
- " count %x ppos %p\n",
+ " count %zx ppos %p\n",
file, buf, count, ppos);
return -EINVAL;
static irqreturn_t UartInterrupt(int irq, void *dev_id, struct pt_regs *regs)
{
PRINTK_3(TRACE_TP3780I,
- "tp3780i::UartInterrupt entry irq %x dev_id %x\n", irq, (int) dev_id);
+ "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id);
return IRQ_HANDLED;
}
unsigned short usIPCSource = 0, usIsolationMask, usPCNum;
PRINTK_3(TRACE_TP3780I,
- "tp3780i::DspInterrupt entry irq %x dev_id %x\n", irq, (int) dev_id);
+ "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id);
if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) {
PRINTK_2(TRACE_TP3780I,
pSettings->bPllBypass = TP_CFG_PllBypass;
pSettings->usChipletEnable = TP_CFG_ChipletEnable;
- if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", 0)) {
+ if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) {
PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq);
goto exit_cleanup;
} else { /* no conflict just release */
free_irq(pSettings->usUartIrq, NULL);
}
- if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", 0)) {
+ if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) {
PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq);
goto exit_cleanup;
} else {
#define MOXA_GET_CUMAJOR (MOXA + 64)
#define MOXA_GETMSTATUS (MOXA + 65)
+#ifdef CONFIG_PCI
static struct pci_device_id mxser_pcibrds[] = {
{ PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_C168, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
MXSER_BOARD_C168_PCI },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
+#endif /* CONFIG_PCI */
static int ioaddr[MXSER_BOARDS];
static int ttymajor = MXSERMAJOR;
static void mxser_getcfg(int board, struct mxser_hwconf *hwconf);
static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
-static int mxser_get_PCI_conf(struct pci_dev *, int, struct mxser_hwconf *);
static void mxser_do_softint(void *);
static int mxser_open(struct tty_struct *, struct file *);
static void mxser_close(struct tty_struct *, struct file *);
static int mxser_startup(struct mxser_struct *);
static void mxser_shutdown(struct mxser_struct *);
static int mxser_change_speed(struct mxser_struct *, struct termios *old_termios);
-static int mxser_get_serial_info(struct mxser_struct *, struct serial_struct *);
-static int mxser_set_serial_info(struct mxser_struct *, struct serial_struct *);
-static int mxser_get_lsr_info(struct mxser_struct *, unsigned int *);
+static int mxser_get_serial_info(struct mxser_struct *, struct serial_struct __user *);
+static int mxser_set_serial_info(struct mxser_struct *, struct serial_struct __user *);
+static int mxser_get_lsr_info(struct mxser_struct *, unsigned int __user *);
static void mxser_send_break(struct mxser_struct *, int);
static int mxser_tiocmget(struct tty_struct *, struct file *);
static int mxser_tiocmset(struct tty_struct *, struct file *, unsigned int, unsigned int);
mxsercfg[board] = *hwconf;
}
+#ifdef CONFIG_PCI
static int mxser_get_PCI_conf(struct pci_dev *pdev, int board_type, struct mxser_hwconf *hwconf)
{
int i;
}
return (0);
}
+#endif /* CONFIG_PCI */
static struct tty_operations mxser_ops = {
.open = mxser_open,
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
info->event = 0;
- info->tty = 0;
+ info->tty = NULL;
if (info->blocked_open) {
if (info->close_delay) {
set_current_state(TASK_INTERRUPTIBLE);
struct mxser_struct *info = (struct mxser_struct *) tty->driver_data;
int retval;
struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct *p_cuser; /* user space */
+ struct serial_icounter_struct __user *p_cuser;
unsigned long templ;
+ void __user *argp = (void __user *)arg;
if (PORTNO(tty) == MXSER_PORTS)
return (mxser_ioctl_special(cmd, arg));
mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
return (0);
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
case TIOCSSOFTCAR:
- if(get_user(templ, (unsigned long *) arg))
+ if(get_user(templ, (unsigned long __user *) arg))
return -EFAULT;
arg = templ;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
return (0);
case TIOCGSERIAL:
- return (mxser_get_serial_info(info, (struct serial_struct *) arg));
+ return mxser_get_serial_info(info, argp);
case TIOCSSERIAL:
- return (mxser_set_serial_info(info, (struct serial_struct *) arg));
+ return mxser_set_serial_info(info, argp);
case TIOCSERGETLSR: /* Get line status register */
- return (mxser_get_lsr_info(info, (unsigned int *) arg));
+ return mxser_get_lsr_info(info, argp);
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
cli();
cnow = info->icount;
restore_flags(flags);
- p_cuser = (struct serial_icounter_struct *) arg;
+ p_cuser = argp;
if(put_user(cnow.cts, &p_cuser->cts))
return -EFAULT;
if(put_user(cnow.dsr, &p_cuser->dsr))
return -EFAULT;
return put_user(cnow.dcd, &p_cuser->dcd);
case MOXA_HighSpeedOn:
- return put_user(info->baud_base != 115200 ? 1 : 0, (int *) arg);
+ return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
default:
return (-ENOIOCTLCMD);
}
static int mxser_ioctl_special(unsigned int cmd, unsigned long arg)
{
int i, result, status;
+ void __user *argp = (void __user *)arg;
switch (cmd) {
case MOXA_GET_CONF:
- if(copy_to_user((struct mxser_hwconf *) arg, mxsercfg,
+ if(copy_to_user(argp, mxsercfg,
sizeof(struct mxser_hwconf) * 4))
return -EFAULT;
return 0;
case MOXA_GET_MAJOR:
- if(copy_to_user((int *) arg, &ttymajor, sizeof(int)))
+ if(copy_to_user(argp, &ttymajor, sizeof(int)))
return -EFAULT;
return 0;
case MOXA_GET_CUMAJOR:
result = 0;
- if(copy_to_user((int *) arg, &result, sizeof(int)))
+ if(copy_to_user(argp, &result, sizeof(int)))
return -EFAULT;
return 0;
if (mxvar_table[i].base)
result |= (1 << i);
}
- return put_user(result, (unsigned long *) arg);
+ return put_user(result, (unsigned long __user *) argp);
case MOXA_GETDATACOUNT:
- if(copy_to_user((struct mxser_log *) arg, &mxvar_log, sizeof(mxvar_log)))
+ if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
return -EFAULT;
return (0);
case MOXA_GETMSTATUS:
else
GMStatus[i].cts = 0;
}
- if(copy_to_user((struct mxser_mstatus *) arg, GMStatus,
+ if(copy_to_user(argp, GMStatus,
sizeof(struct mxser_mstatus) * MXSER_PORTS))
return -EFAULT;
return 0;
info->event = 0;
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = 0;
+ info->tty = NULL;
wake_up_interruptible(&info->open_wait);
}
int pass_counter = 0;
int handled = 0;
- port = 0;
+ port = NULL;
for (i = 0; i < MXSER_BOARDS; i++) {
if (dev_id == &(mxvar_table[i * MXSER_PORTS_PER_BOARD])) {
port = dev_id;
/*
* and set the speed of the serial port
*/
- mxser_change_speed(info, 0);
+ mxser_change_speed(info, NULL);
info->flags |= ASYNC_INITIALIZED;
restore_flags(flags);
*/
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = 0;
+ info->xmit_buf = NULL;
}
info->IER = 0;
outb(0x00, info->base + UART_IER); /* disable all intrs */
* ------------------------------------------------------------
*/
static int mxser_get_serial_info(struct mxser_struct *info,
- struct serial_struct *retinfo)
+ struct serial_struct __user *retinfo)
{
struct serial_struct tmp;
}
static int mxser_set_serial_info(struct mxser_struct *info,
- struct serial_struct *new_info)
+ struct serial_struct __user *new_info)
{
struct serial_struct new_serial;
unsigned int flags;
if (info->flags & ASYNC_INITIALIZED) {
if (flags != (info->flags & ASYNC_SPD_MASK)) {
- mxser_change_speed(info, 0);
+ mxser_change_speed(info, NULL);
}
} else
retval = mxser_startup(info);
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
-static int mxser_get_lsr_info(struct mxser_struct *info, unsigned int *value)
+static int mxser_get_lsr_info(struct mxser_struct *info, unsigned int __user *value)
{
unsigned char status;
unsigned int result;
#endif
tty->disc_data = NULL;
if (tty == n_hdlc->backup_tty)
- n_hdlc->backup_tty = 0;
+ n_hdlc->backup_tty = NULL;
if (tty != n_hdlc->tty)
return;
if (n_hdlc->backup_tty) {
struct n_hdlc *n_hdlc = kmalloc(sizeof(*n_hdlc), GFP_KERNEL);
if (!n_hdlc)
- return 0;
+ return NULL;
memset(n_hdlc, 0, sizeof(*n_hdlc));
static inline unsigned char *alloc_buf(void)
{
- unsigned char *p;
int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
- if (PAGE_SIZE != N_TTY_BUF_SIZE) {
- p = kmalloc(N_TTY_BUF_SIZE, prio);
- if (p)
- memset(p, 0, N_TTY_BUF_SIZE);
- } else
- p = (unsigned char *)get_zeroed_page(prio);
-
- return p;
+ if (PAGE_SIZE != N_TTY_BUF_SIZE)
+ return kmalloc(N_TTY_BUF_SIZE, prio);
+ else
+ return (unsigned char *)__get_free_page(prio);
}
static inline void free_buf(unsigned char *buf)
NVRAM_MINOR);
goto out;
}
- if (!create_proc_read_entry("driver/nvram", 0, 0, nvram_read_proc,
+ if (!create_proc_read_entry("driver/nvram", 0, NULL, nvram_read_proc,
NULL)) {
printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n");
ret = -ENOMEM;
static void __exit
nvram_cleanup_module(void)
{
- remove_proc_entry("driver/nvram", 0);
+ remove_proc_entry("driver/nvram", NULL);
misc_deregister(&nvram_dev);
}
* device at any one time.
*/
-static int button_read (struct file *filp, char *buffer,
+static int button_read (struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
interruptible_sleep_on (&button_wait_queue);
static void button_sequence_finished (unsigned long parameters);
static irqreturn_t button_handler (int irq, void *dev_id, struct pt_regs *regs);
-static int button_read (struct file *filp, char *buffer,
- size_t count, loff_t *ppos);
int button_init (void);
int button_add_callback (void (*callback) (void), int count);
int button_del_callback (void (*callback) (void));
static void kick_open(void);
static int get_flash_id(void);
static int erase_block(int nBlock);
-static int write_block(unsigned long p, const char *buf, int count);
-static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cmd, unsigned long arg);
-static ssize_t flash_read(struct file *file, char *buf, size_t count, loff_t * ppos);
-static ssize_t flash_write(struct file *file, const char *buf, size_t count, loff_t * ppos);
-static loff_t flash_llseek(struct file *file, loff_t offset, int orig);
+static int write_block(unsigned long p, const char __user *buf, int count);
#define KFLASH_SIZE 1024*1024 //1 Meg
#define KFLASH_SIZE4 4*1024*1024 //4 Meg
return 0;
}
-static ssize_t flash_read(struct file *file, char *buf, size_t size, loff_t * ppos)
+static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
+ loff_t *ppos)
{
unsigned long p = *ppos;
unsigned int count = size;
int ret = 0;
if (flashdebug)
- printk(KERN_DEBUG "flash_read: flash_read: offset=0x%lX, buffer=%p, count=0x%X.\n",
- p, buf, count);
+ printk(KERN_DEBUG "flash_read: flash_read: offset=0x%lX, "
+ "buffer=%p, count=0x%X.\n", p, buf, count);
if (count)
ret = -ENXIO;
return ret;
}
-static ssize_t flash_write(struct file *file, const char *buf, size_t size, loff_t * ppos)
+static ssize_t flash_write(struct file *file, const char __user *buf,
+ size_t size, loff_t * ppos)
{
unsigned long p = *ppos;
unsigned int count = size;
break;
}
if (flashdebug)
- printk(KERN_DEBUG "flash_write: writing offset %lX, from buf "
- "%p, bytes left %X.\n", p, buf, count - written);
+ printk(KERN_DEBUG "flash_write: writing offset %lX, "
+ "from buf %p, bytes left %X.\n", p, buf,
+ count - written);
/*
* write_block will limit write to space left in this block
/*
* write_block will limit number of bytes written to the space in this block
*/
-static int write_block(unsigned long p, const char *buf, int count)
+static int write_block(unsigned long p, const char __user *buf, int count)
{
volatile unsigned int c1;
volatile unsigned int c2;
#define CHA 0x00 /* channel A offset */
#define CHB 0x40 /* channel B offset */
+
+/*
+ * FIXME: PPC has PVR defined in asm/reg.h. For now we just undef it.
+ */
+#undef PVR
#define RXFIFO 0
#define TXFIFO 0
static BOOLEAN wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
{
int i = 0;
- unsigned char status;
/* wait for command completion */
- while ((status = read_reg(info, (unsigned char)(channel+STAR)) & BIT2)) {
+ while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) {
udelay(1);
if (i++ == 1000)
return FALSE;
}
static int proc_do_poolsize(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
sysctl_poolsize = random_state->poolinfo.POOLBYTES;
- ret = proc_dointvec(table, write, filp, buffer, lenp);
+ ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
if (ret || !write ||
(sysctl_poolsize == random_state->poolinfo.POOLBYTES))
return ret;
* sysctl system call, it is returned as 16 bytes of binary data.
*/
static int proc_do_uuid(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
fake_table.data = buf;
fake_table.maxlen = sizeof(buf);
- return proc_dostring(&fake_table, write, filp, buffer, lenp);
+ return proc_dostring(&fake_table, write, filp, buffer, lenp, ppos);
}
static int uuid_strategy(ctl_table *table, int __user *name, int nlen,
return (cookie - tmp[17]) & COOKIEMASK; /* Leaving the data behind */
}
#endif
+
+/*
+ * Get a random word:
+ */
+unsigned int get_random_int(void)
+{
+ unsigned int val = 0;
+
+ if (!exec_shield_randomize)
+ return 0;
+
+#ifdef CONFIG_X86_HAS_TSC
+ rdtscl(val);
+#endif
+ val += current->pid + jiffies + (int)&val;
+
+ /*
+ * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
+ * every second, from the entropy pool (and thus creates a limited
+ * drain on it), and uses halfMD4Transform within the second. We
+ * also spice it with the TSC (if available), jiffies, PID and the
+ * stack address:
+ */
+ return secure_ip_id(val);
+}
+
+unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len)
+{
+ unsigned long range = end - len - start;
+ if (end <= start + len)
+ return 0;
+ return PAGE_ALIGN(get_random_int() % range + start);
+}
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
port->event = 0;
- port->tty = 0;
+ port->tty = NULL;
if (port->blocked_open) {
if (port->close_delay) {
current->state = TASK_INTERRUPTIBLE;
}
static inline int rc_set_serial_info(struct riscom_port * port,
- struct serial_struct * newinfo)
+ struct serial_struct __user * newinfo)
{
struct serial_struct tmp;
struct riscom_board *bp = port_Board(port);
}
static inline int rc_get_serial_info(struct riscom_port * port,
- struct serial_struct * retinfo)
+ struct serial_struct __user *retinfo)
{
struct serial_struct tmp;
struct riscom_board *bp = port_Board(port);
{
struct riscom_port *port = (struct riscom_port *)tty->driver_data;
+ void __user *argp = (void __user *)arg;
int retval;
if (rc_paranoia_check(port, tty->name, "rc_ioctl"))
rc_send_break(port, arg ? arg*(HZ/10) : HZ/4);
break;
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned int *) arg);
+ return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned __user *)argp);
case TIOCSSOFTCAR:
- if (get_user(arg,(unsigned int *) arg))
+ if (get_user(arg,(unsigned __user *) argp))
return -EFAULT;
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
break;
case TIOCGSERIAL:
- return rc_get_serial_info(port, (struct serial_struct *) arg);
+ return rc_get_serial_info(port, argp);
case TIOCSSERIAL:
- return rc_set_serial_info(port, (struct serial_struct *) arg);
+ return rc_set_serial_info(port, argp);
default:
return -ENOIOCTLCMD;
}
port->event = 0;
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
- port->tty = 0;
+ port->tty = NULL;
wake_up_interruptible(&port->open_wait);
}
unsigned m = iminor(file->f_dentry->d_inode);
size_t i;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
for (i = 0; i < len; ++i) {
char c;
if (get_user(c, data+i))
unsigned m = iminor(file->f_dentry->d_inode);
int value;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
value = scx200_gpio_get(m);
if (put_user(value ? '1' : '0', buf))
return -EFAULT;
unsigned m = iminor(inode);
if (m > 63)
return -EINVAL;
- return 0;
+ return nonseekable_open(inode, file);
}
static int scx200_gpio_release(struct inode *inode, struct file *file)
/************************* End of Includes **************************/
/***************************** Prototypes ***************************/
-/* Helper functions */
-static __inline__ volatile struct a2232status *a2232stat(unsigned int board,
- unsigned int portonboard);
-static __inline__ volatile struct a2232memory *a2232mem (unsigned int board);
-static __inline__ void a2232_receive_char( struct a2232_port *port,
- int ch, int err );
/* The interrupt service routine */
static irqreturn_t a2232_vbl_inter(int irq, void *data, struct pt_regs *fp);
/* Initialize the port structures */
static struct zorro_dev *zd_a2232[MAX_A2232_BOARDS];
/***************************** End of Global variables **************/
+/* Helper functions */
+
+static inline volatile struct a2232memory *a2232mem(unsigned int board)
+{
+ return (volatile struct a2232memory *)ZTWO_VADDR(zd_a2232[board]->resource.start);
+}
+
+static inline volatile struct a2232status *a2232stat(unsigned int board,
+ unsigned int portonboard)
+{
+ volatile struct a2232memory *mem = a2232mem(board);
+ return &(mem->Status[portonboard]);
+}
+
+static inline void a2232_receive_char(struct a2232_port *port, int ch, int err)
+{
+/* Mostly stolen from other drivers.
+ Maybe one could implement a more efficient version by not only
+ transferring one character at a time.
+*/
+ struct tty_struct *tty = port->gs.tty;
+
+ if (tty->flip.count >= TTY_FLIPBUF_SIZE)
+ return;
+
+ tty->flip.count++;
+
+#if 0
+ switch(err) {
+ case TTY_BREAK:
+ break;
+ case TTY_PARITY:
+ break;
+ case TTY_OVERRUN:
+ break;
+ case TTY_FRAME:
+ break;
+ }
+#endif
+
+ *tty->flip.flag_buf_ptr++ = err;
+ *tty->flip.char_buf_ptr++ = ch;
+ tty_flip_buffer_push(tty);
+}
+
/***************************** Functions ****************************/
/*** BEGIN OF REAL_DRIVER FUNCTIONS ***/
}
/*** END OF FUNCTIONS EXPECTED BY TTY DRIVER STRUCTS ***/
-static __inline__ volatile struct a2232status *a2232stat(unsigned int board, unsigned int portonboard)
-{
- volatile struct a2232memory *mem = a2232mem(board);
- return &(mem->Status[portonboard]);
-}
-
-static __inline__ volatile struct a2232memory *a2232mem (unsigned int board)
-{
- return (volatile struct a2232memory *) ZTWO_VADDR( zd_a2232[board]->resource.start );
-}
-
-static __inline__ void a2232_receive_char( struct a2232_port *port,
- int ch, int err )
-{
-/* Mostly stolen from other drivers.
- Maybe one could implement a more efficient version by not only
- transferring one character at a time.
-*/
- struct tty_struct *tty = port->gs.tty;
-
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- return;
-
- tty->flip.count++;
-
-#if 0
- switch(err) {
- case TTY_BREAK:
- break;
- case TTY_PARITY:
- break;
- case TTY_OVERRUN:
- break;
- case TTY_FRAME:
- break;
- }
-#endif
-
- *tty->flip.flag_buf_ptr++ = err;
- *tty->flip.char_buf_ptr++ = ch;
- tty_flip_buffer_push(tty);
-}
-
static irqreturn_t a2232_vbl_inter(int irq, void *data, struct pt_regs *fp)
{
#if A2232_IOBUFLEN != 256
{ SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
- { 0, 0, 0, 0 }
+ { 0 }
};
#define SONYPI_BUF_SIZE 128
/*
* There is a bunch of documentation about the card, jumpers, config
* settings, restrictions, cables, device names and numbers in
- * ../../Documentation/specialix.txt
+ * Documentation/specialix.txt
*/
#include <linux/config.h>
tty->ldisc.flush_buffer(tty);
tty->closing = 0;
port->event = 0;
- port->tty = 0;
+ port->tty = NULL;
if (port->blocked_open) {
if (port->close_delay) {
current->state = TASK_INTERRUPTIBLE;
static inline int sx_set_serial_info(struct specialix_port * port,
- struct serial_struct * newinfo)
+ struct serial_struct __user * newinfo)
{
struct serial_struct tmp;
struct specialix_board *bp = port_Board(port);
int change_speed;
unsigned long flags;
- int error;
- error = verify_area(VERIFY_READ, (void *) newinfo, sizeof(tmp));
- if (error)
- return error;
-
if (copy_from_user(&tmp, newinfo, sizeof(tmp)))
return -EFAULT;
static inline int sx_get_serial_info(struct specialix_port * port,
- struct serial_struct * retinfo)
+ struct serial_struct __user *retinfo)
{
struct serial_struct tmp;
struct specialix_board *bp = port_Board(port);
- int error;
- error = verify_area(VERIFY_WRITE, (void *) retinfo, sizeof(tmp));
- if (error)
- return error;
-
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_CIRRUS;
tmp.line = port - sx_port;
unsigned int cmd, unsigned long arg)
{
struct specialix_port *port = (struct specialix_port *)tty->driver_data;
- int error;
int retval;
+ void __user *argp = (void __user *)arg;
if (sx_paranoia_check(port, tty->name, "sx_ioctl"))
return -ENODEV;
sx_send_break(port, arg ? arg*(HZ/10) : HZ/4);
return 0;
case TIOCGSOFTCAR:
- error = verify_area(VERIFY_WRITE, (void *) arg, sizeof(long));
- if (error)
- return error;
- put_user(C_CLOCAL(tty) ? 1 : 0,
- (unsigned long *) arg);
+ if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)argp))
+ return -EFAULT;
return 0;
case TIOCSSOFTCAR:
- get_user(arg, (unsigned long *) arg);
+ if (get_user(arg, (unsigned long __user *) argp))
+ return -EFAULT;
tty->termios->c_cflag =
((tty->termios->c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
return 0;
case TIOCGSERIAL:
- return sx_get_serial_info(port, (struct serial_struct *) arg);
+ return sx_get_serial_info(port, argp);
case TIOCSSERIAL:
- return sx_set_serial_info(port, (struct serial_struct *) arg);
+ return sx_set_serial_info(port, argp);
default:
return -ENOIOCTLCMD;
}
port->event = 0;
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
- port->tty = 0;
+ port->tty = NULL;
wake_up_interruptible(&port->open_wait);
}
static int stl_brdinit(stlbrd_t *brdp);
static int stl_initports(stlbrd_t *brdp, stlpanel_t *panelp);
static int stl_mapirq(int irq, char *name);
-static int stl_getserial(stlport_t *portp, struct serial_struct *sp);
-static int stl_setserial(stlport_t *portp, struct serial_struct *sp);
-static int stl_getbrdstats(combrd_t *bp);
-static int stl_getportstats(stlport_t *portp, comstats_t *cp);
-static int stl_clrportstats(stlport_t *portp, comstats_t *cp);
-static int stl_getportstruct(unsigned long arg);
-static int stl_getbrdstruct(unsigned long arg);
+static int stl_getserial(stlport_t *portp, struct serial_struct __user *sp);
+static int stl_setserial(stlport_t *portp, struct serial_struct __user *sp);
+static int stl_getbrdstats(combrd_t __user *bp);
+static int stl_getportstats(stlport_t *portp, comstats_t __user *cp);
+static int stl_clrportstats(stlport_t *portp, comstats_t __user *cp);
+static int stl_getportstruct(stlport_t __user *arg);
+static int stl_getbrdstruct(stlbrd_t __user *arg);
static int stl_waitcarrier(stlport_t *portp, struct file *filp);
static void stl_delay(int len);
static void stl_eiointr(stlbrd_t *brdp);
{
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("init_module()\n");
#endif
unsigned long flags;
int i, j, k;
-#if DEBUG
+#ifdef DEBUG
printk("cleanup_module()\n");
#endif
* Check for any arguments passed in on the module load command line.
*/
-static void stl_argbrds()
+static void stl_argbrds(void)
{
stlconf_t conf;
stlbrd_t *brdp;
int nrargs, i;
-#if DEBUG
+#ifdef DEBUG
printk("stl_argbrds()\n");
#endif
char *sp;
int nrbrdnames, i;
-#if DEBUG
+#ifdef DEBUG
printk("stl_parsebrd(confp=%x,argp=%x)\n", (int) confp, (int) argp);
#endif
* Allocate a new board structure. Fill out the basic info in it.
*/
-static stlbrd_t *stl_allocbrd()
+static stlbrd_t *stl_allocbrd(void)
{
stlbrd_t *brdp;
unsigned int minordev;
int brdnr, panelnr, portnr, rc;
-#if DEBUG
+#ifdef DEBUG
printk("stl_open(tty=%x,filp=%x): device=%s\n", (int) tty,
(int) filp, tty->name);
#endif
unsigned long flags;
int rc, doclocal;
-#if DEBUG
+#ifdef DEBUG
printk("stl_waitcarrier(portp=%x,filp=%x)\n", (int) portp, (int) filp);
#endif
stlport_t *portp;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_close(tty=%x,filp=%x)\n", (int) tty, (int) filp);
#endif
static void stl_delay(int len)
{
-#if DEBUG
+#ifdef DEBUG
printk("stl_delay(len=%d)\n", len);
#endif
if (len > 0) {
unsigned char *chbuf;
char *head, *tail;
-#if DEBUG
+#ifdef DEBUG
printk("stl_write(tty=%x,from_user=%d,buf=%x,count=%d)\n",
(int) tty, from_user, (int) buf, count);
#endif
unsigned int len;
char *head, *tail;
-#if DEBUG
+#ifdef DEBUG
printk("stl_putchar(tty=%x,ch=%x)\n", (int) tty, (int) ch);
#endif
{
stlport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_flushchars(tty=%x)\n", (int) tty);
#endif
stlport_t *portp;
char *head, *tail;
-#if DEBUG
+#ifdef DEBUG
printk("stl_writeroom(tty=%x)\n", (int) tty);
#endif
unsigned int size;
char *head, *tail;
-#if DEBUG
+#ifdef DEBUG
printk("stl_charsinbuffer(tty=%x)\n", (int) tty);
#endif
* Generate the serial struct info.
*/
-static int stl_getserial(stlport_t *portp, struct serial_struct *sp)
+static int stl_getserial(stlport_t *portp, struct serial_struct __user *sp)
{
struct serial_struct sio;
stlbrd_t *brdp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_getserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
* just quietly ignore any requests to change irq, etc.
*/
-static int stl_setserial(stlport_t *portp, struct serial_struct *sp)
+static int stl_setserial(stlport_t *portp, struct serial_struct __user *sp)
{
struct serial_struct sio;
-#if DEBUG
+#ifdef DEBUG
printk("stl_setserial(portp=%x,sp=%x)\n", (int) portp, (int) sp);
#endif
stlport_t *portp;
unsigned int ival;
int rc;
+ void __user *argp = (void __user *)arg;
-#if DEBUG
+#ifdef DEBUG
printk("stl_ioctl(tty=%x,file=%x,cmd=%x,arg=%x)\n",
(int) tty, (int) file, cmd, (int) arg);
#endif
switch (cmd) {
case TIOCGSOFTCAR:
rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
- (unsigned int *) arg);
+ (unsigned __user *) argp);
break;
case TIOCSSOFTCAR:
- if ((rc = verify_area(VERIFY_READ, (void *) arg,
- sizeof(int))) == 0) {
- get_user(ival, (unsigned int *) arg);
- tty->termios->c_cflag =
+ if (get_user(ival, (unsigned int __user *) arg))
+ return -EFAULT;
+ tty->termios->c_cflag =
(tty->termios->c_cflag & ~CLOCAL) |
(ival ? CLOCAL : 0);
- }
break;
case TIOCGSERIAL:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(struct serial_struct))) == 0)
- rc = stl_getserial(portp, (struct serial_struct *) arg);
+ rc = stl_getserial(portp, argp);
break;
case TIOCSSERIAL:
- if ((rc = verify_area(VERIFY_READ, (void *) arg,
- sizeof(struct serial_struct))) == 0)
- rc = stl_setserial(portp, (struct serial_struct *) arg);
+ rc = stl_setserial(portp, argp);
break;
case COM_GETPORTSTATS:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(comstats_t))) == 0)
- rc = stl_getportstats(portp, (comstats_t *) arg);
+ rc = stl_getportstats(portp, argp);
break;
case COM_CLRPORTSTATS:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(comstats_t))) == 0)
- rc = stl_clrportstats(portp, (comstats_t *) arg);
+ rc = stl_clrportstats(portp, argp);
break;
case TIOCSERCONFIG:
case TIOCSERGWILD:
stlport_t *portp;
struct termios *tiosp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_settermios(tty=%x,old=%x)\n", (int) tty, (int) old);
#endif
{
stlport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_throttle(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_unthrottle(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_stop(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_start(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_hangup(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_flushbuffer(tty=%x)\n", (int) tty);
#endif
{
stlport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_breakctl(tty=%x,state=%d)\n", (int) tty, state);
#endif
stlport_t *portp;
unsigned long tend;
-#if DEBUG
+#ifdef DEBUG
printk("stl_waituntilsent(tty=%x,timeout=%d)\n", (int) tty, timeout);
#endif
{
stlport_t *portp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sendxchar(tty=%x,ch=%x)\n", (int) tty, ch);
#endif
int curoff, maxoff;
char *pos;
-#if DEBUG
+#ifdef DEBUG
printk("stl_readproc(page=%x,start=%x,off=%x,count=%d,eof=%x,"
"data=%x\n", (int) page, (int) start, (int) off, count,
(int) eof, (int) data);
int i;
int handled = 0;
-#if DEBUG
+#ifdef DEBUG
printk("stl_intr(irq=%d,regs=%x)\n", irq, (int) regs);
#endif
portp = private;
-#if DEBUG
+#ifdef DEBUG
printk("stl_offintr(portp=%x)\n", (int) portp);
#endif
{
int rc, i;
-#if DEBUG
+#ifdef DEBUG
printk("stl_mapirq(irq=%d,name=%s)\n", irq, name);
#endif
stlport_t *portp;
int chipmask, i;
-#if DEBUG
+#ifdef DEBUG
printk("stl_initports(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp);
#endif
char *name;
int rc;
-#if DEBUG
+#ifdef DEBUG
printk("stl_initeio(brdp=%x)\n", (int) brdp);
#endif
int panelnr, banknr, i;
char *name;
-#if DEBUG
+#ifdef DEBUG
printk("stl_initech(brdp=%x)\n", (int) brdp);
#endif
{
int i;
-#if DEBUG
+#ifdef DEBUG
printk("stl_brdinit(brdp=%x)\n", (int) brdp);
#endif
* Find the next available board number that is free.
*/
-static inline int stl_getbrdnr()
+static inline int stl_getbrdnr(void)
{
int i;
{
stlbrd_t *brdp;
-#if DEBUG
+#ifdef DEBUG
printk("stl_initpcibrd(brdtype=%d,busnr=%x,devnr=%x)\n", brdtype,
devp->bus->number, devp->devfn);
#endif
* Different Stallion boards use the BAR registers in different ways,
* so set up io addresses based on board type.
*/
-#if DEBUG
+#ifdef DEBUG
printk("%s(%d): BAR[]=%x,%x,%x,%x IRQ=%x\n", __FILE__, __LINE__,
pci_resource_start(devp, 0), pci_resource_start(devp, 1),
pci_resource_start(devp, 2), pci_resource_start(devp, 3), devp->irq);
*/
-static inline int stl_findpcibrds()
+static inline int stl_findpcibrds(void)
{
struct pci_dev *dev = NULL;
int i, rc;
-#if DEBUG
+#ifdef DEBUG
printk("stl_findpcibrds()\n");
#endif
* since the initial search and setup is too different.
*/
-static inline int stl_initbrds()
+static inline int stl_initbrds(void)
{
stlbrd_t *brdp;
stlconf_t *confp;
int i;
-#if DEBUG
+#ifdef DEBUG
printk("stl_initbrds()\n");
#endif
* Return the board stats structure to user app.
*/
-static int stl_getbrdstats(combrd_t *bp)
+static int stl_getbrdstats(combrd_t __user *bp)
{
stlbrd_t *brdp;
stlpanel_t *panelp;
* what port to get stats for (used through board control device).
*/
-static int stl_getportstats(stlport_t *portp, comstats_t *cp)
+static int stl_getportstats(stlport_t *portp, comstats_t __user *cp)
{
unsigned char *head, *tail;
unsigned long flags;
- if (portp == (stlport_t *) NULL) {
+ if (!portp) {
if (copy_from_user(&stl_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stl_getport(stl_comstats.brd, stl_comstats.panel,
* Clear the port stats structure. We also return it zeroed out...
*/
-static int stl_clrportstats(stlport_t *portp, comstats_t *cp)
+static int stl_clrportstats(stlport_t *portp, comstats_t __user *cp)
{
- if (portp == (stlport_t *) NULL) {
+ if (!portp) {
if (copy_from_user(&stl_comstats, cp, sizeof(comstats_t)))
return -EFAULT;
portp = stl_getport(stl_comstats.brd, stl_comstats.panel,
* Return the entire driver ports structure to a user app.
*/
-static int stl_getportstruct(unsigned long arg)
+static int stl_getportstruct(stlport_t __user *arg)
{
stlport_t *portp;
- if (copy_from_user(&stl_dummyport, (void *) arg, sizeof(stlport_t)))
+ if (copy_from_user(&stl_dummyport, arg, sizeof(stlport_t)))
return -EFAULT;
portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
stl_dummyport.portnr);
- if (portp == (stlport_t *) NULL)
- return(-ENODEV);
- return copy_to_user((void *)arg, portp,
- sizeof(stlport_t)) ? -EFAULT : 0;
+ if (!portp)
+ return -ENODEV;
+ return copy_to_user(arg, portp, sizeof(stlport_t)) ? -EFAULT : 0;
}
/*****************************************************************************/
* Return the entire driver board structure to a user app.
*/
-static int stl_getbrdstruct(unsigned long arg)
+static int stl_getbrdstruct(stlbrd_t __user *arg)
{
stlbrd_t *brdp;
- if (copy_from_user(&stl_dummybrd, (void *) arg, sizeof(stlbrd_t)))
+ if (copy_from_user(&stl_dummybrd, arg, sizeof(stlbrd_t)))
return -EFAULT;
if ((stl_dummybrd.brdnr < 0) || (stl_dummybrd.brdnr >= STL_MAXBRDS))
- return(-ENODEV);
+ return -ENODEV;
brdp = stl_brds[stl_dummybrd.brdnr];
- if (brdp == (stlbrd_t *) NULL)
+ if (!brdp)
return(-ENODEV);
- return copy_to_user((void *)arg, brdp, sizeof(stlbrd_t)) ? -EFAULT : 0;
+ return copy_to_user(arg, brdp, sizeof(stlbrd_t)) ? -EFAULT : 0;
}
/*****************************************************************************/
static int stl_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg)
{
int brdnr, rc;
+ void __user *argp = (void __user *)arg;
-#if DEBUG
+#ifdef DEBUG
printk("stl_memioctl(ip=%x,fp=%x,cmd=%x,arg=%x)\n", (int) ip,
(int) fp, cmd, (int) arg);
#endif
switch (cmd) {
case COM_GETPORTSTATS:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(comstats_t))) == 0)
- rc = stl_getportstats((stlport_t *) NULL,
- (comstats_t *) arg);
+ rc = stl_getportstats(NULL, argp);
break;
case COM_CLRPORTSTATS:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(comstats_t))) == 0)
- rc = stl_clrportstats((stlport_t *) NULL,
- (comstats_t *) arg);
+ rc = stl_clrportstats(NULL, argp);
break;
case COM_GETBRDSTATS:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(combrd_t))) == 0)
- rc = stl_getbrdstats((combrd_t *) arg);
+ rc = stl_getbrdstats(argp);
break;
case COM_READPORT:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(stlport_t))) == 0)
- rc = stl_getportstruct(arg);
+ rc = stl_getportstruct(argp);
break;
case COM_READBOARD:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(stlbrd_t))) == 0)
- rc = stl_getbrdstruct(arg);
+ rc = stl_getbrdstruct(argp);
break;
default:
rc = -ENOIOCTLCMD;
int chipmask, i, j;
int nrchips, uartaddr, ioaddr;
-#if DEBUG
+#ifdef DEBUG
printk("stl_panelinit(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp);
#endif
static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp)
{
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400portinit(brdp=%x,panelp=%x,portp=%x)\n",
(int) brdp, (int) panelp, (int) portp);
#endif
* them all up.
*/
-#if DEBUG
+#ifdef DEBUG
printk("SETPORT: portnr=%d panelnr=%d brdnr=%d\n",
portp->portnr, portp->panelnr, portp->brdnr);
printk(" cor1=%x cor2=%x cor3=%x cor4=%x cor5=%x\n",
unsigned char msvr1, msvr2;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400setsignals(portp=%x,dtr=%d,rts=%d)\n",
(int) portp, dtr, rts);
#endif
unsigned long flags;
int sigs;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400getsignals(portp=%x)\n", (int) portp);
#endif
unsigned char ccr;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400enablerxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
unsigned char sreron, sreroff;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400startrxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
{
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400disableintrs(portp=%x)\n", (int) portp);
#endif
save_flags(flags);
{
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400sendbreak(portp=%x,len=%d)\n", (int) portp, len);
#endif
struct tty_struct *tty;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400flowctrl(portp=%x,state=%x)\n", (int) portp, state);
#endif
struct tty_struct *tty;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400sendflow(portp=%x,state=%x)\n", (int) portp, state);
#endif
{
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400flush(portp=%x)\n", (int) portp);
#endif
static int stl_cd1400datastate(stlport_t *portp)
{
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400datastate(portp=%x)\n", (int) portp);
#endif
{
unsigned char svrtype;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400eiointr(panelp=%x,iobase=%x)\n",
(int) panelp, iobase);
#endif
{
unsigned char svrtype;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400echintr(panelp=%x,iobase=%x)\n", (int) panelp,
iobase);
#endif
char *head, *tail;
unsigned char ioack, srer;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400txisr(panelp=%x,ioaddr=%x)\n", (int) panelp, ioaddr);
#endif
unsigned char status;
char ch;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400rxisr(panelp=%x,ioaddr=%x)\n", (int) panelp, ioaddr);
#endif
unsigned int ioack;
unsigned char misr;
-#if DEBUG
+#ifdef DEBUG
printk("stl_cd1400mdmisr(panelp=%x)\n", (int) panelp);
#endif
int chipmask, i;
int nrchips, ioaddr;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198panelinit(brdp=%x,panelp=%x)\n",
(int) brdp, (int) panelp);
#endif
static void stl_sc26198portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp)
{
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198portinit(brdp=%x,panelp=%x,portp=%x)\n",
(int) brdp, (int) panelp, (int) portp);
#endif
* them all up.
*/
-#if DEBUG
+#ifdef DEBUG
printk("SETPORT: portnr=%d panelnr=%d brdnr=%d\n",
portp->portnr, portp->panelnr, portp->brdnr);
printk(" mr0=%x mr1=%x mr2=%x clk=%x\n", mr0, mr1, mr2, clk);
unsigned char iopioron, iopioroff;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198setsignals(portp=%x,dtr=%d,rts=%d)\n",
(int) portp, dtr, rts);
#endif
unsigned long flags;
int sigs;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198getsignals(portp=%x)\n", (int) portp);
#endif
unsigned char ccr;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198enablerxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
unsigned char imr;
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198startrxtx(portp=%x,rx=%d,tx=%d)\n",
(int) portp, rx, tx);
#endif
{
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198disableintrs(portp=%x)\n", (int) portp);
#endif
{
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198sendbreak(portp=%x,len=%d)\n", (int) portp, len);
#endif
unsigned long flags;
unsigned char mr0;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198flowctrl(portp=%x,state=%x)\n", (int) portp, state);
#endif
unsigned long flags;
unsigned char mr0;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198sendflow(portp=%x,state=%x)\n", (int) portp, state);
#endif
{
unsigned long flags;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198flush(portp=%x)\n", (int) portp);
#endif
unsigned long flags;
unsigned char sr;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198datastate(portp=%x)\n", (int) portp);
#endif
{
int i;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198wait(portp=%x)\n", (int) portp);
#endif
int len, stlen;
char *head, *tail;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198txisr(portp=%x)\n", (int) portp);
#endif
struct tty_struct *tty;
unsigned int len, buflen, ioaddr;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198rxisr(portp=%x,iack=%x)\n", (int) portp, iack);
#endif
{
unsigned char cir, ipr, xisr;
-#if DEBUG
+#ifdef DEBUG
printk("stl_sc26198otherisr(portp=%x,iack=%x)\n", (int) portp, iack);
#endif
#define PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8 0x2000
#endif
+#ifdef CONFIG_PCI
static struct pci_device_id sx_pci_tbl[] = {
{ PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, sx_pci_tbl);
+#endif /* CONFIG_PCI */
/* Configurable options:
(Don't be too sure that it'll work if you toggle them) */
unsigned int cmd, unsigned long arg)
{
int rc = 0;
- int *descr = (int *)arg, i;
+ int __user *descr = (int __user *)arg;
+ int i;
static struct sx_board *board = NULL;
int nbytes, offset;
unsigned long data;
get_user (data, descr++);
while (nbytes && data) {
for (i=0;i<nbytes;i += SX_CHUNK_SIZE) {
- if (copy_from_user(tmp, (char *)data + i,
+ if (copy_from_user(tmp, (char __user *)data+i,
(i + SX_CHUNK_SIZE >
nbytes) ? nbytes - i :
SX_CHUNK_SIZE)) {
{
int rc;
struct sx_port *port = tty->driver_data;
+ void __user *argp = (void __user *)arg;
int ival;
/* func_enter2(); */
switch (cmd) {
case TIOCGSOFTCAR:
rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
- (unsigned int *) arg);
+ (unsigned __user *) argp);
break;
case TIOCSSOFTCAR:
- if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
+ if ((rc = get_user(ival, (unsigned __user *) argp)) == 0) {
tty->termios->c_cflag =
(tty->termios->c_cflag & ~CLOCAL) |
(ival ? CLOCAL : 0);
}
break;
case TIOCGSERIAL:
- if ((rc = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(struct serial_struct))) == 0)
- rc = gs_getserial(&port->gs, (struct serial_struct *) arg);
+ rc = gs_getserial(&port->gs, argp);
break;
case TIOCSSERIAL:
- if ((rc = verify_area(VERIFY_READ, (void *) arg,
- sizeof(struct serial_struct))) == 0)
- rc = gs_setserial(&port->gs, (struct serial_struct *) arg);
+ rc = gs_setserial(&port->gs, argp);
break;
default:
rc = -ENOIOCTLCMD;
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = 0;
+ info->xmit_buf = NULL;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
shutdown(info);
tty->closing = 0;
- info->tty = 0;
+ info->tty = NULL;
if (info->blocked_open) {
if (info->close_delay) {
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = 0;
+ info->tty = NULL;
wake_up_interruptible(&info->open_wait);
cleanup:
if (retval) {
if (tty->count == 1)
- info->tty = 0; /* tty layer will release tty struct */
+ info->tty = NULL;/* tty layer will release tty struct */
if(info->count)
info->count--;
}
}
if (info->memory_base){
iounmap(info->memory_base);
- info->memory_base = 0;
+ info->memory_base = NULL;
}
if (info->lcr_base){
iounmap(info->lcr_base - info->lcr_offset);
- info->lcr_base = 0;
+ info->lcr_base = NULL;
}
if ( debug_level >= DEBUG_LEVEL_INFO )
#define TMCS 0x64
#define TEPR 0x65
+/*
+ * FIXME: DAR here clashed with asm-ppc/reg.h and asm-sh/.../dma.h
+ */
+#undef DAR
/* DMA Controller Register macros */
#define DAR 0x80
#define DARL 0x80
cleanup:
if (retval) {
if (tty->count == 1)
- info->tty = 0; /* tty layer will release tty struct */
+ info->tty = NULL;/* tty layer will release tty struct */
if(info->count)
info->count--;
}
shutdown(info);
tty->closing = 0;
- info->tty = 0;
+ info->tty = NULL;
if (info->blocked_open) {
if (info->close_delay) {
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = 0;
+ info->tty = NULL;
wake_up_interruptible(&info->open_wait);
}
if (info->tx_buf) {
kfree(info->tx_buf);
- info->tx_buf = 0;
+ info->tx_buf = NULL;
}
spin_lock_irqsave(&info->lock,flags);
if (info->memory_base){
iounmap(info->memory_base);
- info->memory_base = 0;
+ info->memory_base = NULL;
}
if (info->sca_base) {
iounmap(info->sca_base - info->sca_offset);
- info->sca_base=0;
+ info->sca_base=NULL;
}
if (info->statctrl_base) {
iounmap(info->statctrl_base - info->statctrl_offset);
- info->statctrl_base=0;
+ info->statctrl_base=NULL;
}
if (info->lcr_base){
iounmap(info->lcr_base - info->lcr_offset);
- info->lcr_base = 0;
+ info->lcr_base = NULL;
}
if ( debug_level >= DEBUG_LEVEL_INFO )
u32 speed = info->params.clock_speed;
info->params.clock_speed = 3686400;
- info->tty = 0;
+ info->tty = NULL;
/* assume failure */
info->init_error = DiagStatus_DmaFailure;
init_ti_parallel(minor);
parport_release(table[minor].dev);
- return 0;
+ return nonseekable_open(inode, file);
}
static int
if (count == 0)
return 0;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
parport_claim_or_block(table[minor].dev);
while (n < count) {
printk(TPQIC02_NAME ": ll_do_qic_cmd(%x, %ld) failed\n", cmd, (long) timeout);
return -EIO;
}
-#if OBSOLETE
+#ifdef OBSOLETE
/* wait for ready since it may not be active immediately after reading status */
while ((inb_p(QIC02_STAT_PORT) & QIC02_STAT_READY) != 0)
cpu_relax();
if (stat != TE_OK)
return stat;
-#if OBSOLETE
+#ifdef OBSOLETE
/************* not needed iff rd_status() would wait for ready!!!!!! **********/
if (wait_for_ready(TIM_S) != TE_OK) { /*** not sure this is needed ***/
tpqputs(TPQD_ALWAYS, "wait_for_ready failed in start_dma");
release_region(QIC02_TAPE_PORT, QIC02_TAPE_PORT_RANGE);
if (buffaddr)
free_pages((unsigned long) buffaddr, get_order(TPQBUF_SIZE));
- buffaddr = 0; /* Better to cause a panic than overwite someone else */
+ buffaddr = NULL; /* Better to cause a panic than overwite someone else */
status_zombie = YES;
} /* qic02_release_resources */
ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *);
static unsigned int tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
+static int ptmx_open(struct inode *, struct file *);
static int tty_release(struct inode *, struct file *);
int tty_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg);
static ssize_t hung_up_tty_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- /* Can't seek (pread) on ttys. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
return 0;
}
static ssize_t hung_up_tty_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on ttys. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
return -EIO;
}
.fasync = tty_fasync,
};
+#ifdef CONFIG_UNIX98_PTYS
+static struct file_operations ptmx_fops = {
+ .llseek = no_llseek,
+ .read = tty_read,
+ .write = tty_write,
+ .poll = tty_poll,
+ .ioctl = tty_ioctl,
+ .open = ptmx_open,
+ .release = tty_release,
+ .fasync = tty_fasync,
+};
+#endif
+
static struct file_operations console_fops = {
.llseek = no_llseek,
.read = tty_read,
struct tty_struct * tty;
struct inode *inode;
- /* Can't seek (pread) on ttys. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
tty = (struct tty_struct *)file->private_data;
inode = file->f_dentry->d_inode;
if (tty_paranoia_check(tty, inode, "tty_read"))
struct tty_struct * tty;
struct inode *inode = file->f_dentry->d_inode;
- /* Can't seek (pwrite) on ttys. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
tty = (struct tty_struct *)file->private_data;
if (tty_paranoia_check(tty, inode, "tty_write"))
return -EIO;
if (p) {
ssize_t res;
- /* Can't seek (pwrite) on ttys. */
- if (ppos != &file->f_pos)
- return -ESPIPE;
res = vfs_write(p, buf, count, &p->f_pos);
fput(p);
return res;
{
struct tty_struct *tty, *o_tty;
int pty_master, tty_closing, o_tty_closing, do_sleep;
- int devpts_master;
+ int devpts_master, devpts;
int idx;
char buf[64];
idx = tty->index;
pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER);
- devpts_master = pty_master && (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM);
+ devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
+ devpts_master = pty_master && devpts;
o_tty = tty->link;
#ifdef TTY_PARANOIA_CHECK
#ifdef CONFIG_UNIX98_PTYS
/* Make this pty number available for reallocation */
- if (devpts_master) {
+ if (devpts) {
down(&allocated_ptys_lock);
idr_remove(&allocated_ptys, idx);
up(&allocated_ptys_lock);
dev_t device = inode->i_rdev;
unsigned short saved_flags = filp->f_flags;
+ nonseekable_open(inode, filp);
retry_open:
noctty = filp->f_flags & O_NOCTTY;
index = -1;
return -ENODEV;
}
-#ifdef CONFIG_UNIX98_PTYS
- if (device == MKDEV(TTYAUX_MAJOR,2)) {
- int idr_ret;
-
- /* find a device that is not in use. */
- down(&allocated_ptys_lock);
- if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
- up(&allocated_ptys_lock);
- return -ENOMEM;
- }
- idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
- if (idr_ret < 0) {
- up(&allocated_ptys_lock);
- if (idr_ret == -EAGAIN)
- return -ENOMEM;
- return -EIO;
- }
- if (index >= pty_limit) {
- idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
- return -EIO;
- }
- up(&allocated_ptys_lock);
-
- driver = ptm_driver;
- retval = init_dev(driver, index, &tty);
- if (retval) {
- down(&allocated_ptys_lock);
- idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
- return retval;
- }
-
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- if (devpts_pty_new(tty->link))
- retval = -ENOMEM;
- } else
-#endif
- {
- driver = get_tty_driver(device, &index);
- if (!driver)
- return -ENODEV;
+ driver = get_tty_driver(device, &index);
+ if (!driver)
+ return -ENODEV;
got_driver:
- retval = init_dev(driver, index, &tty);
- if (retval)
- return retval;
- }
+ retval = init_dev(driver, index, &tty);
+ if (retval)
+ return retval;
filp->private_data = tty;
file_move(filp, &tty->tty_files);
printk(KERN_DEBUG "error %d in opening %s...", retval,
tty->name);
#endif
-
-#ifdef CONFIG_UNIX98_PTYS
- if (index != -1) {
- down(&allocated_ptys_lock);
- idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
- }
-#endif
-
release_dev(filp);
if (retval != -ERESTARTSYS)
return retval;
return 0;
}
+#ifdef CONFIG_UNIX98_PTYS
+static int ptmx_open(struct inode * inode, struct file * filp)
+{
+ struct tty_struct *tty;
+ int retval;
+ int index;
+ int idr_ret;
+
+ nonseekable_open(inode, filp);
+
+ /* find a device that is not in use. */
+ down(&allocated_ptys_lock);
+ if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
+ up(&allocated_ptys_lock);
+ return -ENOMEM;
+ }
+ idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
+ if (idr_ret < 0) {
+ up(&allocated_ptys_lock);
+ if (idr_ret == -EAGAIN)
+ return -ENOMEM;
+ return -EIO;
+ }
+ if (index >= pty_limit) {
+ idr_remove(&allocated_ptys, index);
+ up(&allocated_ptys_lock);
+ return -EIO;
+ }
+ up(&allocated_ptys_lock);
+
+ retval = init_dev(ptm_driver, index, &tty);
+ if (retval)
+ goto out;
+
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ filp->private_data = tty;
+ file_move(filp, &tty->tty_files);
+
+ retval = -ENOMEM;
+ if (devpts_pty_new(tty->link))
+ goto out1;
+
+ check_tty_count(tty, "tty_open");
+ retval = ptm_driver->open(tty, filp);
+ if (!retval)
+ return 0;
+out1:
+ release_dev(filp);
+out:
+ down(&allocated_ptys_lock);
+ idr_remove(&allocated_ptys, index);
+ up(&allocated_ptys_lock);
+ return retval;
+}
+#endif
+
static int tty_release(struct inode * inode, struct file * filp)
{
lock_kernel();
class_simple_device_add(tty_class, MKDEV(TTYAUX_MAJOR, 1), NULL, "console");
#ifdef CONFIG_UNIX98_PTYS
- cdev_init(&ptmx_cdev, &tty_fops);
+ cdev_init(&ptmx_cdev, &ptmx_fops);
if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
panic("Couldn't register /dev/ptmx driver\n");
#include <linux/workqueue.h>
#include <linux/bootmem.h>
#include <linux/pm.h>
+#include <linux/font.h>
#include <asm/io.h>
#include <asm/system.h>
#define max_font_size 65536
-int con_font_op(int currcons, struct console_font_op *op)
+int con_font_get(int currcons, struct console_font_op *op)
{
+ struct console_font font;
int rc = -EINVAL;
- int size = max_font_size, set;
- u8 *temp = NULL;
- struct console_font_op old_op;
+ int c;
if (vt_cons[currcons]->vc_mode != KD_TEXT)
- goto quit;
- memcpy(&old_op, op, sizeof(old_op));
- if (op->op == KD_FONT_OP_SET) {
- if (!op->data)
- return -EINVAL;
- if (op->charcount > 512)
- goto quit;
- if (!op->height) { /* Need to guess font height [compat] */
- int h, i;
- u8 __user *charmap = op->data;
- u8 tmp;
-
- /* If from KDFONTOP ioctl, don't allow things which can be done in userland,
- so that we can get rid of this soon */
- if (!(op->flags & KD_FONT_FLAG_OLD))
- goto quit;
- rc = -EFAULT;
- for (h = 32; h > 0; h--)
- for (i = 0; i < op->charcount; i++) {
- if (get_user(tmp, &charmap[32*i+h-1]))
- goto quit;
- if (tmp)
- goto nonzero;
- }
- rc = -EINVAL;
- goto quit;
- nonzero:
- rc = -EINVAL;
- op->height = h;
- }
- if (op->width > 32 || op->height > 32)
- goto quit;
- size = (op->width+7)/8 * 32 * op->charcount;
- if (size > max_font_size)
- return -ENOSPC;
- set = 1;
- } else if (op->op == KD_FONT_OP_GET)
- set = 0;
- else {
- acquire_console_sem();
- rc = sw->con_font_op(vc_cons[currcons].d, op);
- release_console_sem();
- return rc;
- }
+ return -EINVAL;
+
if (op->data) {
- temp = kmalloc(size, GFP_KERNEL);
- if (!temp)
+ font.data = kmalloc(max_font_size, GFP_KERNEL);
+ if (!font.data)
return -ENOMEM;
- if (set && copy_from_user(temp, op->data, size)) {
- rc = -EFAULT;
- goto quit;
- }
- op->data = temp;
- }
+ } else
+ font.data = NULL;
acquire_console_sem();
- rc = sw->con_font_op(vc_cons[currcons].d, op);
+ if (sw->con_font_get)
+ rc = sw->con_font_get(vc_cons[currcons].d, &font);
+ else
+ rc = -ENOSYS;
release_console_sem();
- op->data = old_op.data;
- if (!rc && !set) {
- int c = (op->width+7)/8 * 32 * op->charcount;
-
- if (op->data && op->charcount > old_op.charcount)
+ if (rc)
+ goto out;
+
+ c = (font.width+7)/8 * 32 * font.charcount;
+
+ if (op->data && font.charcount > op->charcount)
+ rc = -ENOSPC;
+ if (!(op->flags & KD_FONT_FLAG_OLD)) {
+ if (font.width > op->width || font.height > op->height)
+ rc = -ENOSPC;
+ } else {
+ if (font.width != 8)
+ rc = -EIO;
+ else if ((op->height && font.height > op->height) ||
+ font.height > 32)
rc = -ENOSPC;
- if (!(op->flags & KD_FONT_FLAG_OLD)) {
- if (op->width > old_op.width ||
- op->height > old_op.height)
- rc = -ENOSPC;
- } else {
- if (op->width != 8)
- rc = -EIO;
- else if ((old_op.height && op->height > old_op.height) ||
- op->height > 32)
- rc = -ENOSPC;
- }
- if (!rc && op->data && copy_to_user(op->data, temp, c))
- rc = -EFAULT;
}
-quit: if (temp)
- kfree(temp);
+ if (rc)
+ goto out;
+
+ if (op->data && copy_to_user(op->data, font.data, c))
+ rc = -EFAULT;
+
+out:
+ kfree(font.data);
return rc;
}
+int con_font_set(int currcons, struct console_font_op *op)
+{
+ struct console_font font;
+ int rc = -EINVAL;
+ int size;
+
+ if (vt_cons[currcons]->vc_mode != KD_TEXT)
+ return -EINVAL;
+ if (!op->data)
+ return -EINVAL;
+ if (op->charcount > 512)
+ return -EINVAL;
+ if (!op->height) { /* Need to guess font height [compat] */
+ int h, i;
+ u8 __user *charmap = op->data;
+ u8 tmp;
+
+ /* If from KDFONTOP ioctl, don't allow things which can be done in userland,
+ so that we can get rid of this soon */
+ if (!(op->flags & KD_FONT_FLAG_OLD))
+ return -EINVAL;
+ for (h = 32; h > 0; h--)
+ for (i = 0; i < op->charcount; i++) {
+ if (get_user(tmp, &charmap[32*i+h-1]))
+ return -EFAULT;
+ if (tmp)
+ goto nonzero;
+ }
+ return -EINVAL;
+ nonzero:
+ op->height = h;
+ }
+ if (op->width <= 0 || op->width > 32 || op->height > 32)
+ return -EINVAL;
+ size = (op->width+7)/8 * 32 * op->charcount;
+ if (size > max_font_size)
+ return -ENOSPC;
+ font.charcount = op->charcount;
+ font.height = op->height;
+ font.width = op->width;
+ font.data = kmalloc(size, GFP_KERNEL);
+ if (!font.data)
+ return -ENOMEM;
+ if (copy_from_user(font.data, op->data, size)) {
+ kfree(font.data);
+ return -EFAULT;
+ }
+ acquire_console_sem();
+ if (sw->con_font_set)
+ rc = sw->con_font_set(vc_cons[currcons].d, &font, op->flags);
+ else
+ rc = -ENOSYS;
+ release_console_sem();
+ kfree(font.data);
+ return rc;
+}
+
+int con_font_default(int currcons, struct console_font_op *op)
+{
+ struct console_font font = {.width = op->width, .height = op->height};
+ char name[MAX_FONT_NAME];
+ char *s = name;
+ int rc;
+
+ if (vt_cons[currcons]->vc_mode != KD_TEXT)
+ return -EINVAL;
+
+ if (!op->data)
+ s = NULL;
+ else if (strncpy_from_user(name, op->data, MAX_FONT_NAME - 1) < 0)
+ return -EFAULT;
+ else
+ name[MAX_FONT_NAME - 1] = 0;
+
+ acquire_console_sem();
+ if (sw->con_font_default)
+ rc = sw->con_font_default(vc_cons[currcons].d, &font, s);
+ else
+ rc = -ENOSYS;
+ release_console_sem();
+ if (!rc) {
+ op->width = font.width;
+ op->height = font.height;
+ }
+ return rc;
+}
+
+int con_font_copy(int currcons, struct console_font_op *op)
+{
+ int con = op->height;
+ struct vc_data *vc;
+ int rc;
+
+ if (vt_cons[currcons]->vc_mode != KD_TEXT)
+ return -EINVAL;
+
+ acquire_console_sem();
+ vc = vc_cons[currcons].d;
+ if (!sw->con_font_copy)
+ rc = -ENOSYS;
+ else if (con < 0 || !vc_cons_allocated(con))
+ rc = -ENOTTY;
+ else if (con == vc->vc_num) /* nothing to do */
+ rc = 0;
+ else
+ rc = sw->con_font_copy(vc, con);
+ release_console_sem();
+ return rc;
+}
+
+int con_font_op(int currcons, struct console_font_op *op)
+{
+ switch (op->op) {
+ case KD_FONT_OP_SET:
+ return con_font_set(currcons, op);
+ case KD_FONT_OP_GET:
+ return con_font_get(currcons, op);
+ case KD_FONT_OP_SET_DEFAULT:
+ return con_font_default(currcons, op);
+ case KD_FONT_OP_COPY:
+ return con_font_copy(currcons, op);
+ }
+ return -ENOSYS;
+}
+
/*
* Interface exported to selection and vcs.
*/
op.width = 8;
op.height = 0;
op.charcount = 256;
- op.data = (char *) arg;
+ op.data = up;
return con_font_op(fg_console, &op);
}
op.width = 8;
op.height = 32;
op.charcount = 256;
- op.data = (char *) arg;
+ op.data = up;
return con_font_op(fg_console, &op);
}
Say N if you are unsure.
+config IXP2000_WATCHDOG
+ tristate "IXP2000 Watchdog"
+ depends on WATCHDOG && ARCH_IXP2000
+ help
+ Say Y here if to include support for the watchdog timer
+ in the Intel IXP2000(2400, 2800, 2850) network processors.
+ This driver can be built as a module by choosing M. The module
+ will be called ixp2000_wdt.
+
+ Say N if you are unsure.
+
config SA1100_WATCHDOG
tristate "SA1100/PXA2xx watchdog"
depends on WATCHDOG && ( ARCH_SA1100 || ARCH_PXA )
obj-$(CONFIG_PCIPCWATCHDOG) += pcwd_pci.o
obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
+obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o
static ssize_t acq_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character 'V' and reload the timer */
if(count) {
if (!nowayout) {
/* Activate */
acq_keepalive();
- return 0;
+ return nonseekable_open(inode, file);
}
static int acq_close(struct inode *inode, struct file *file)
static ssize_t
advwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (count) {
if (!nowayout) {
size_t i;
*/
advwdt_ping();
- return 0;
+ return nonseekable_open(inode, file);
}
static int
static ssize_t ali_write(struct file *file, const char __user *data,
size_t len, loff_t * ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
/* Activate */
ali_start();
- return 0;
+ return nonseekable_open(inode, file);
}
/*
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
- /* We can't seek */
- if(ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character 'V' and reload the timer */
if(count) {
if (!nowayout) {
return -EBUSY;
/* Good, fire up the show */
wdt_startup();
- return 0;
+ return nonseekable_open(inode, file);
}
static int fop_close(struct inode * inode, struct file * file)
static ssize_t eurwdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (count) {
if (!nowayout) {
size_t i;
eurwdt_timeout = WDT_TIMEOUT; /* initial timeout */
/* Activate the WDT */
eurwdt_activate_timer();
- return 0;
+ return nonseekable_open(inode, file);
}
/**
*/
tco_timer_keepalive ();
tco_timer_start ();
- return 0;
+ return nonseekable_open(inode, file);
}
static int i8xx_tco_release (struct inode *inode, struct file *file)
static ssize_t i8xx_tco_write (struct file *file, const char __user *data,
size_t len, loff_t * ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
static ssize_t
ibwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (count) {
if (!nowayout) {
size_t i;
/* Activate */
ibwdt_ping();
spin_unlock(&ibwdt_lock);
- return 0;
+ return nonseekable_open(inode, file);
}
static int
indydog_alive = 1;
printk(KERN_INFO "Started watchdog timer.\n");
- return 0;
+ return nonseekable_open(inode, file);
}
static int indydog_release(struct inode *inode, struct file *file)
static ssize_t indydog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/* Refresh the timer. */
if (len) {
indydog_ping();
wdt_enable();
- return 0;
+ return nonseekable_open(inode, file);
}
static ssize_t
ixp4xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (len) {
if (!nowayout) {
size_t i;
static ssize_t zf_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character */
if(count){
zf_timer_on();
- return 0;
+ return nonseekable_open(inode, file);
}
static int zf_close(struct inode *inode, struct file *file)
mixcomwd_timer_alive=0;
}
}
- return 0;
+ return nonseekable_open(inode, file);
}
static int mixcomwd_release(struct inode *inode, struct file *file)
static ssize_t mixcomwd_write(struct file *file, const char __user *data, size_t len, loff_t *ppos)
{
- if (ppos != &file->f_pos) {
- return -ESPIPE;
- }
-
if(len)
{
if (!nowayout) {
static ssize_t pcwd_write(struct file *file, const char __user *buf, size_t len,
loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (len) {
if (!nowayout) {
size_t i;
/* Activate */
pcwd_start();
pcwd_keepalive();
- return(0);
+ return nonseekable_open(inode, file);
}
static int pcwd_close(struct inode *inode, struct file *file)
{
int temperature;
- /* Can't seek (pread) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (pcwd_get_temperature(&temperature))
return -EFAULT;
if (!supports_temp)
return -ENODEV;
- return 0;
+ return nonseekable_open(inode, file);
}
static int pcwd_temp_close(struct inode *inode, struct file *file)
static ssize_t pcipcwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
/* Activate */
pcipcwd_start();
pcipcwd_keepalive();
- return 0;
+ return nonseekable_open(inode, file);
}
static int pcipcwd_release(struct inode *inode, struct file *file)
{
int temperature;
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (pcipcwd_get_temperature(&temperature))
return -EFAULT;
if (!pcipcwd_private.supports_temp)
return -ENODEV;
- return 0;
+ return nonseekable_open(inode, file);
}
static int pcipcwd_temp_release(struct inode *inode, struct file *file)
static ssize_t usb_pcwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
/* Activate */
usb_pcwd_start(usb_pcwd_device);
usb_pcwd_keepalive(usb_pcwd_device);
- return 0;
+ return nonseekable_open(inode, file);
}
static int usb_pcwd_release(struct inode *inode, struct file *file)
{
int temperature;
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature))
return -EFAULT;
static int usb_pcwd_temperature_open(struct inode *inode, struct file *file)
{
- return 0;
+ return nonseekable_open(inode, file);
}
static int usb_pcwd_temperature_release(struct inode *inode, struct file *file)
*/
static int sa1100dog_open(struct inode *inode, struct file *file)
{
+ nonseekable_open(inode, file);
if (test_and_set_bit(1,&sa1100wdt_users))
return -EBUSY;
static ssize_t sa1100dog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (len) {
if (!nowayout) {
size_t i;
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
- /* We can't seek */
- if(ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character 'V' and reload the timer */
if(count)
{
static int fop_open(struct inode * inode, struct file * file)
{
+ nonseekable_open(inode, file);
+
/* Just in case we're already talking to someone... */
if(test_and_set_bit(0, &wdt_is_open))
return -EBUSY;
static int sc1200wdt_open(struct inode *inode, struct file *file)
{
+ nonseekable_open(inode, file);
+
/* allow one at a time */
if (down_trylock(&open_sem))
return -EBUSY;
static ssize_t sc1200wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos)
{
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (len) {
if (!nowayout) {
size_t i;
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
- /* We can't seek */
- if(ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character 'V' and reload the timer */
if(count) {
if (!nowayout) {
static int fop_open(struct inode * inode, struct file * file)
{
+ nonseekable_open(inode, file);
+
/* Just in case we're already talking to someone... */
if(test_and_set_bit(0, &wdt_is_open))
return -EBUSY;
return -EBUSY;
scx200_wdt_enable();
- return 0;
+ return nonseekable_open(inode, file);
}
static int scx200_wdt_release(struct inode *inode, struct file *file)
static ssize_t scx200_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/* check for a magic close character */
if (len)
{
sh_wdt_start();
- return 0;
+ return nonseekable_open(inode, file);
}
/**
static ssize_t sh_wdt_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (count) {
if (!nowayout) {
size_t i;
* Activate timer
*/
softdog_keepalive();
- return 0;
+ return nonseekable_open(inode, file);
}
static int softdog_release(struct inode *inode, struct file *file)
static ssize_t softdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/*
* Refresh the timer.
*/
static ssize_t
wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (count) {
if (!nowayout) {
size_t i;
*/
wdt_ping();
- return 0;
+ return nonseekable_open(inode, file);
}
static int
static ssize_t fop_write(struct file * file, const char __user * buf, size_t count, loff_t * ppos)
{
- /* We can't seek */
- if(ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character 'V' and reload the timer */
if(count)
{
/* Good, fire up the show */
wdt_startup();
- return 0;
+ return nonseekable_open(inode, file);
}
static int fop_close(struct inode * inode, struct file * file)
static ssize_t wafwdt_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/* See if we got the magic character 'V' and reload the timer */
if (count) {
if (!nowayout) {
* Activate
*/
wafwdt_start();
- return 0;
+ return nonseekable_open(inode, file);
}
static int
static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if(count) {
if (!nowayout) {
size_t i;
* Activate
*/
wdt_start();
- return 0;
+ return nonseekable_open(inode, file);
}
/**
{
int temperature;
- /* Can't seek (pread) on this device */
- if (ptr != &file->f_pos)
- return -ESPIPE;
-
if (wdt_get_temperature(&temperature))
return -EFAULT;
static int wdt_temp_open(struct inode *inode, struct file *file)
{
- return 0;
+ return nonseekable_open(inode, file);
}
/**
ret = 0;
#endif
+ nonseekable_open(inode, file);
return ret;
}
static ssize_t
watchdog_write(struct file *file, const char *data, size_t len, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
/*
* Refresh the timer.
*/
__module_get(THIS_MODULE);
wdt977_start();
- return 0;
+ return nonseekable_open(inode, file);
}
static int wdt977_release(struct inode *inode, struct file *file)
* write of data will do, as we we don't define content meaning.
*/
-static ssize_t wdt977_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
+static ssize_t wdt977_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (count) {
if (!nowayout) {
size_t i;
int status;
int new_options, retval = -EINVAL;
int new_timeout;
+ union {
+ struct watchdog_info __user *ident;
+ int __user *i;
+ } uarg;
+
+ uarg.i = (int __user *)arg;
switch(cmd)
{
return -ENOIOCTLCMD;
case WDIOC_GETSUPPORT:
- return copy_to_user((struct watchdog_info *)arg, &ident,
+ return copy_to_user(uarg.ident, &ident,
sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
wdt977_get_status(&status);
- return put_user(status, (int *) arg);
+ return put_user(status, uarg.i);
case WDIOC_GETBOOTSTATUS:
- return put_user(0, (int *) arg);
+ return put_user(0, uarg.i);
case WDIOC_KEEPALIVE:
wdt977_keepalive();
return 0;
case WDIOC_SETOPTIONS:
- if (get_user (new_options, (int *) arg))
+ if (get_user (new_options, uarg.i))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
return retval;
case WDIOC_SETTIMEOUT:
- if (get_user(new_timeout, (int *) arg))
+ if (get_user(new_timeout, uarg.i))
return -EFAULT;
if (wdt977_set_timeout(new_timeout))
/* Fall */
case WDIOC_GETTIMEOUT:
- return put_user(timeout, (int *)arg);
+ return put_user(timeout, uarg.i);
}
}
static ssize_t wdtpci_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (count) {
if (!nowayout) {
size_t i;
* Activate
*/
wdtpci_start();
- return 0;
+ return nonseekable_open(inode, file);
}
/**
{
int temperature;
- /* Can't seek (pread) on this device */
- if (ptr != &file->f_pos)
- return -ESPIPE;
-
if (wdtpci_get_temperature(&temperature))
return -EFAULT;
static int wdtpci_temp_open(struct inode *inode, struct file *file)
{
- return 0;
+ return nonseekable_open(inode, file);
}
/**
/*********************** cpufreq_sysctl interface ********************/
static int
cpufreq_procctl(ctl_table *ctl, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[16], *p;
int cpu = (long) ctl->extra1;
unsigned int len, left = *lenp;
- if (!left || (filp->f_pos && !write) || !cpu_online(cpu)) {
+ if (!left || (*ppos && !write) || !cpu_online(cpu)) {
*lenp = 0;
return 0;
}
}
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return 0;
}
static int __init soc_probe(void)
{
struct sbus_bus *sbus;
- struct sbus_dev *sdev = 0;
+ struct sbus_dev *sdev = NULL;
struct soc *s;
int cards = 0;
static int __init socal_probe(void)
{
struct sbus_bus *sbus;
- struct sbus_dev *sdev = 0;
+ struct sbus_dev *sdev = NULL;
struct socal *s;
int cards = 0;
/*
- * Copyright (C) 2002, 2003, 2004 Hewlett-Packard Co.
- * Khalid Aziz <khalid_aziz@hp.com>
+ * Parse the EFI PCDP table to locate the console device.
+ *
+ * (c) Copyright 2002, 2003, 2004 Hewlett-Packard Development Company, L.P.
+ * Khalid Aziz <khalid.aziz@hp.com>
* Alex Williamson <alex.williamson@hp.com>
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
- * Parse the EFI PCDP table to locate the console device.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
/*
- * Copyright (C) 2002, 2004 Hewlett-Packard Co.
- * Khalid Aziz <khalid_aziz@hp.com>
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
* Definitions for PCDP-defined console devices
*
* v1.0a: http://www.dig64.org/specifications/DIG64_HCDPv10a_01.pdf
* v2.0: http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf
+ *
+ * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P.
+ * Khalid Aziz <khalid.aziz@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#define PCDP_CONSOLE 0
/* Allocate space for two transmit and two receive buffer
* descriptors in the DP ram.
*/
- data->dp_addr = m8xx_cpm_dpram_offset(m8xx_cpm_dpalloc(sizeof(cbd_t)
- * 4));
-
+ data->dp_addr = cpm_dpalloc(sizeof(cbd_t) * 4, 8);
+
/* ptr to i2c area */
data->i2c = (i2c8xx_t *)&(((immap_t *)IMAP_ADDR)->im_i2c);
}
config SENSORS_VIA686A
tristate "VIA686A"
- depends on I2C && EXPERIMENTAL
+ depends on I2C && PCI && EXPERIMENTAL
select I2C_SENSOR
select I2C_ISA
help
config BLK_DEV_IDE_PMAC
bool "Builtin PowerMac IDE support"
- depends on PPC_PMAC
+ depends on PPC_PMAC && IDE=y
help
This driver provides support for the built-in IDE controller on
most of the recent Apple Power Macintoshes and PowerBooks.
do_end_request = 1;
} else if (sense_key == ILLEGAL_REQUEST ||
sense_key == DATA_PROTECT) {
- /*
- * check if this was a write protected media
- */
- if (rq_data_dir(rq) == WRITE) {
- printk("ide-cd: media marked write protected\n");
- set_disk_ro(drive->disk, 1);
- }
-
/* No point in retrying after an illegal
request or data protect error.*/
ide_dump_status (drive, "command error", stat);
* sg request
*/
if (rq->bio) {
- if (rq->data_len & 3) {
- printk("%s: block pc not aligned, len=%d\n", drive->name, rq->data_len);
- cdrom_end_request(drive, 0);
- return ide_stopped;
- }
- info->dma = drive->using_dma;
+ int mask = drive->queue->dma_alignment;
+ unsigned long addr = (unsigned long) page_address(bio_page(rq->bio));
+
info->cmd = rq_data_dir(rq);
+ info->dma = drive->using_dma;
+
+ /*
+ * check if dma is safe
+ */
+ if ((rq->data_len & mask) || (addr & mask))
+ info->dma = 0;
}
/* Start sending the command to the drive. */
int nslots;
blk_queue_prep_rq(drive->queue, ide_cdrom_prep_fn);
- blk_queue_dma_alignment(drive->queue, 3);
+ blk_queue_dma_alignment(drive->queue, 31);
drive->queue->unplug_delay = (1 * HZ) / 1000;
if (!drive->queue->unplug_delay)
drive->queue->unplug_delay = 1;
nslots = ide_cdrom_probe_capabilities (drive);
/*
- * set correct block size and read-only for non-ram media
+ * set correct block size
*/
- set_disk_ro(drive->disk, !CDROM_CONFIG_FLAGS(drive)->ram);
blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
#if 0
idetape_tape_t *tape = drive->driver_data;
ssize_t bytes_read,temp, actually_read = 0, rc;
- if (ppos != &file->f_pos) {
- /* "A request was outside the capabilities of the device." */
- return -ENXIO;
- }
#if IDETAPE_DEBUG_LOG
if (tape->debug_level >= 3)
printk(KERN_INFO "ide-tape: Reached idetape_chrdev_read, count %Zd\n", count);
idetape_tape_t *tape = drive->driver_data;
ssize_t retval, actually_written = 0;
- if (ppos != &file->f_pos) {
- /* "A request was outside the capabilities of the device." */
- return -ENXIO;
- }
-
/* The drive is write protected. */
if (tape->write_prot)
return -EACCES;
idetape_pc_t pc;
int retval;
+ nonseekable_open(inode, filp);
#if IDETAPE_DEBUG_LOG
printk(KERN_INFO "ide-tape: Reached idetape_chrdev_open\n");
#endif /* IDETAPE_DEBUG_LOG */
/*
- * linux/drivers/ide/pci/hpt366.c Version 0.34 Sept 17, 2002
+ * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
*
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003 Red Hat Inc
*
* Thanks to HighPoint Technologies for their assistance, and hardware.
* Special Thanks to Jon Burchmore in SanDiego for the deep pockets, his
* Reset the hpt366 on error, reset on dma
* Fix disabling Fast Interrupt hpt366.
* Mike Waychison <crlf@sun.com>
+ *
+ * Added support for 372N clocking and clock switching. The 372N needs
+ * different clocks on read/write. This requires overloading rw_disk and
+ * other deeply crazy things. Thanks to <http://www.hoerstreich.de> for
+ * keeping me sane.
+ * Alan Cox <alan@redhat.com>
+ *
*/
class_rev &= 0xff;
switch(dev->device) {
+ /* Remap new 372N onto 372 */
+ case PCI_DEVICE_ID_TTI_HPT372N:
+ class_rev = PCI_DEVICE_ID_TTI_HPT372; break;
case PCI_DEVICE_ID_TTI_HPT374:
class_rev = PCI_DEVICE_ID_TTI_HPT374; break;
case PCI_DEVICE_ID_TTI_HPT371:
return mode;
}
+/*
+ * Note for the future; the SATA hpt37x we must set
+ * either PIO or UDMA modes 0,4,5
+ */
+
static u8 hpt3xx_ratefilter (ide_drive_t *drive, u8 speed)
{
struct pci_dev *dev = HWIF(drive)->pci_dev;
return __ide_dma_end(drive);
}
+/**
+ * hpt372n_set_clock - perform clock switching dance
+ * @drive: Drive to switch
+ * @mode: Switching mode (0x21 for write, 0x23 otherwise)
+ *
+ * Switch the DPLL clock on the HPT372N devices. This is a
+ * right mess.
+ */
+
+static void hpt372n_set_clock(ide_drive_t *drive, int mode)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+
+ /* FIXME: should we check for DMA active and BUG() */
+ /* Tristate the bus */
+ outb(0x80, hwif->dma_base+0x73);
+ outb(0x80, hwif->dma_base+0x77);
+
+ /* Switch clock and reset channels */
+ outb(mode, hwif->dma_base+0x7B);
+ outb(0xC0, hwif->dma_base+0x79);
+
+ /* Reset state machines */
+ outb(0x37, hwif->dma_base+0x70);
+ outb(0x37, hwif->dma_base+0x74);
+
+ /* Complete reset */
+ outb(0x00, hwif->dma_base+0x79);
+
+ /* Reconnect channels to bus */
+ outb(0x00, hwif->dma_base+0x73);
+ outb(0x00, hwif->dma_base+0x77);
+}
+
+/**
+ * hpt372n_rw_disk - wrapper for I/O
+ * @drive: drive for command
+ * @rq: block request structure
+ * @block: block number
+ *
+ * This is called when a disk I/O is issued to the 372N instead
+ * of the default functionality. We need it because of the clock
+ * switching
+ *
+ */
+
+static ide_startstop_t hpt372n_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block)
+{
+ int wantclock;
+
+ if(rq_data_dir(rq) == READ)
+ wantclock = 0x21;
+ else
+ wantclock = 0x23;
+
+ if(HWIF(drive)->config_data != wantclock)
+ {
+ hpt372n_set_clock(drive, wantclock);
+ HWIF(drive)->config_data = wantclock;
+ }
+ return __ide_do_rw_disk(drive, rq, block);
+}
+
/*
* Since SUN Cobalt is attempting to do this operation, I should disclose
* this has been a long time ago Thu Jul 27 16:40:57 2000 was the patch date
u16 freq;
u32 pll;
u8 reg5bh;
-
-#if 1
u8 reg5ah = 0;
+ unsigned long dmabase = pci_resource_start(dev, 4);
+ u8 did, rid;
+ int is_372n = 0;
+
pci_read_config_byte(dev, 0x5a, ®5ah);
/* interrupt force enable */
pci_write_config_byte(dev, 0x5a, (reg5ah & ~0x10));
-#endif
+
+ if(dmabase)
+ {
+ did = inb(dmabase + 0x22);
+ rid = inb(dmabase + 0x28);
+
+ if((did == 4 && rid == 6) || (did == 5 && rid > 1))
+ is_372n = 1;
+ }
/*
* default to pci clock. make sure MA15/16 are set to output
/*
* set up the PLL. we need to adjust it so that it's stable.
* freq = Tpll * 192 / Tpci
+ *
+ * Todo. For non x86 should probably check the dword is
+ * set to 0xABCDExxx indicating the BIOS saved f_CNT
*/
pci_read_config_word(dev, 0x78, &freq);
freq &= 0x1FF;
- if (freq < 0xa0) {
- pll = F_LOW_PCI_33;
- if (hpt_minimum_revision(dev,8))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt374);
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt370a);
+
+ /*
+ * The 372N uses different PCI clock information and has
+ * some other complications
+ * On PCI33 timing we must clock switch
+ * On PCI66 timing we must NOT use the PCI clock
+ *
+ * Currently we always set up the PLL for the 372N
+ */
+
+ pci_set_drvdata(dev, NULL);
+
+ if(is_372n)
+ {
+ printk(KERN_INFO "hpt: HPT372N detected, using 372N timing.\n");
+ if(freq < 0x55)
+ pll = F_LOW_PCI_33;
+ else if(freq < 0x70)
+ pll = F_LOW_PCI_40;
+ else if(freq < 0x7F)
+ pll = F_LOW_PCI_50;
else
- pci_set_drvdata(dev, (void *) thirty_three_base_hpt370);
- printk("HPT37X: using 33MHz PCI clock\n");
- } else if (freq < 0xb0) {
- pll = F_LOW_PCI_40;
- } else if (freq < 0xc8) {
- pll = F_LOW_PCI_50;
- if (hpt_minimum_revision(dev,8))
- pci_set_drvdata(dev, NULL);
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) fifty_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
+ pll = F_LOW_PCI_66;
+
+ printk(KERN_INFO "FREQ: %d PLL: %d\n", freq, pll);
+
+ /* We always use the pll not the PCI clock on 372N */
+ }
+ else
+ {
+ if(freq < 0x9C)
+ pll = F_LOW_PCI_33;
+ else if(freq < 0xb0)
+ pll = F_LOW_PCI_40;
+ else if(freq <0xc8)
+ pll = F_LOW_PCI_50;
else
- pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
- printk("HPT37X: using 50MHz PCI clock\n");
- } else {
- pll = F_LOW_PCI_66;
- if (hpt_minimum_revision(dev,8))
- {
- printk(KERN_ERR "HPT37x: 66MHz timings are not supported.\n");
- pci_set_drvdata(dev, NULL);
+ pll = F_LOW_PCI_66;
+
+ if (pll == F_LOW_PCI_33) {
+ if (hpt_minimum_revision(dev,8))
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt374);
+ else if (hpt_minimum_revision(dev,5))
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt372);
+ else if (hpt_minimum_revision(dev,4))
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt370a);
+ else
+ pci_set_drvdata(dev, (void *) thirty_three_base_hpt370);
+ printk("HPT37X: using 33MHz PCI clock\n");
+ } else if (pll == F_LOW_PCI_40) {
+ /* Unsupported */
+ } else if (pll == F_LOW_PCI_50) {
+ if (hpt_minimum_revision(dev,8))
+ pci_set_drvdata(dev, NULL);
+ else if (hpt_minimum_revision(dev,5))
+ pci_set_drvdata(dev, (void *) fifty_base_hpt372);
+ else if (hpt_minimum_revision(dev,4))
+ pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
+ else
+ pci_set_drvdata(dev, (void *) fifty_base_hpt370a);
+ printk("HPT37X: using 50MHz PCI clock\n");
+ } else {
+ if (hpt_minimum_revision(dev,8))
+ {
+ printk(KERN_ERR "HPT37x: 66MHz timings are not supported.\n");
+ }
+ else if (hpt_minimum_revision(dev,5))
+ pci_set_drvdata(dev, (void *) sixty_six_base_hpt372);
+ else if (hpt_minimum_revision(dev,4))
+ pci_set_drvdata(dev, (void *) sixty_six_base_hpt370a);
+ else
+ pci_set_drvdata(dev, (void *) sixty_six_base_hpt370);
+ printk("HPT37X: using 66MHz PCI clock\n");
}
- else if (hpt_minimum_revision(dev,5))
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt372);
- else if (hpt_minimum_revision(dev,4))
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt370a);
- else
- pci_set_drvdata(dev, (void *) sixty_six_base_hpt370);
- printk("HPT37X: using 66MHz PCI clock\n");
}
/*
if (pci_get_drvdata(dev))
goto init_hpt37X_done;
+ if (hpt_minimum_revision(dev,8))
+ {
+ printk(KERN_ERR "HPT374: Only 33MHz PCI timings are supported.\n");
+ return -EOPNOTSUPP;
+ }
/*
* adjust PLL based upon PCI clock, enable it, and wait for
* stabilization.
{
struct pci_dev *dev = hwif->pci_dev;
u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02;
-
+ u8 did, rid;
+ unsigned long dmabase = hwif->dma_base;
+ int is_372n = 0;
+
+ if(dmabase)
+ {
+ did = inb(dmabase + 0x22);
+ rid = inb(dmabase + 0x28);
+
+ if((did == 4 && rid == 6) || (did == 5 && rid > 1))
+ is_372n = 1;
+ }
+
hwif->tuneproc = &hpt3xx_tune_drive;
hwif->speedproc = &hpt3xx_tune_chipset;
hwif->quirkproc = &hpt3xx_quirkproc;
hwif->intrproc = &hpt3xx_intrproc;
hwif->maskproc = &hpt3xx_maskproc;
+
+ if(is_372n)
+ hwif->rw_disk = &hpt372n_rw_disk;
/*
* The HPT37x uses the CBLID pins as outputs for MA15/MA16
u8 pin1 = 0, pin2 = 0;
unsigned int class_rev;
char *chipset_names[] = {"HPT366", "HPT366", "HPT368",
- "HPT370", "HPT370A", "HPT372"};
+ "HPT370", "HPT370A", "HPT372",
+ "HPT372N" };
if (PCI_FUNC(dev->devfn) & 1)
return;
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xff;
- strcpy(d->name, chipset_names[class_rev]);
+ if(dev->device == PCI_DEVICE_ID_TTI_HPT372N)
+ class_rev = 6;
+
+ if(class_rev <= 6)
+ d->name = chipset_names[class_rev];
switch(class_rev) {
+ case 6:
case 5:
case 4:
case 3: ide_setup_pci_device(dev, d);
{ PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
{ PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
{ PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT374, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
+ { PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372N, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, hpt366_pci_tbl);
.channels = 2, /* 4 */
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ },{ /* 5 */
+ .name = "HPT372N",
+ .init_setup = init_setup_hpt37x,
+ .init_chipset = init_chipset_hpt366,
+ .init_hwif = init_hwif_hpt366,
+ .init_dma = init_dma_hpt366,
+ .channels = 2, /* 4 */
+ .autodma = AUTODMA,
+ .bootable = OFF_BOARD,
}
};
if (!pmif->mediabay) {
ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/100);
+ msleep(10);
ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(IDE_WAKEUP_DELAY);
+ msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
}
/* Sanitize drive timings */
/* This is necessary to enable IDE when net-booting */
ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/100);
+ msleep(10);
ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(IDE_WAKEUP_DELAY);
+ msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
}
/* Setup MMIO ops */
config IEEE1394
tristate "IEEE 1394 (FireWire) support"
+ depends on PCI || BROKEN
help
IEEE 1394 describes a high performance serial bus, which is also
known as FireWire(tm) or i.Link(tm) and is used for connecting all
config IEEE1394_SBP2
tristate "SBP-2 support (Harddisks etc.)"
- depends on IEEE1394 && SCSI
+ depends on IEEE1394 && SCSI && (PCI || BROKEN)
help
This option enables you to use SBP-2 devices connected to your IEEE
1394 bus. SBP-2 devices include harddrives and DVD devices.
static int dv1394_fasync(int fd, struct file *file, int on)
{
/* I just copied this code verbatim from Alan Cox's mouse driver example
- (linux/Documentation/DocBook/) */
+ (Documentation/DocBook/) */
struct video_card *video = file_to_video_card(file);
if (file->f_op->ioctl != dv1394_ioctl)
return -EFAULT;
- if (copy_from_user(&dv32, (void *)arg, sizeof(dv32)))
+ if (copy_from_user(&dv32, (void __user *)arg, sizeof(dv32)))
return -EFAULT;
dv.api_version = dv32.api_version;
dv32.n_clear_frames = dv.n_clear_frames;
dv32.dropped_frames = dv.dropped_frames;
- if (copy_to_user((struct dv1394_status32 *)arg, &dv32, sizeof(dv32)))
+ if (copy_to_user((struct dv1394_status32 __user *)arg, &dv32, sizeof(dv32)))
ret = -EFAULT;
}
return 0;
}
+static inline void purge_partial_datagram(struct list_head *old)
+{
+ struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
+ struct list_head *lh, *n;
+
+ list_for_each_safe(lh, n, &pd->frag_info) {
+ struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
+ list_del(lh);
+ kfree(fi);
+ }
+ list_del(old);
+ kfree_skb(pd->skb);
+ kfree(pd);
+}
/******************************************
* 1394 bus activity functions
return 0;
}
-static inline void purge_partial_datagram(struct list_head *old)
-{
- struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
- struct list_head *lh, *n;
-
- list_for_each_safe(lh, n, &pd->frag_info) {
- struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
- list_del(lh);
- kfree(fi);
- }
- list_del(old);
- kfree_skb(pd->skb);
- kfree(pd);
-}
-
static inline int is_datagram_complete(struct list_head *lh, int dg_size)
{
struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
#include "raw1394.h"
#include "raw1394-private.h"
-#if BITS_PER_LONG == 64
-#define int2ptr(x) ((void __user *)x)
+#define int2ptr(x) ((void __user *)(unsigned long)x)
#define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
-#else
-#define int2ptr(x) ((void __user *)(u32)x)
-#define ptr2int(x) ((u64)(unsigned long)(void __user *)x)
-#endif
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
#define RAW1394_DEBUG
if (another_host) {
DBGMSG("another hosts entry is valid -> SUCCESS");
if (copy_to_user(int2ptr(req->req.recvb),
- int2ptr(&addr->start),sizeof(u64))) {
+ &addr->start,sizeof(u64))) {
printk(KERN_ERR "raw1394: arm_register failed "
" address-range-entry is invalid -> EFAULT !!!\n");
vfree(addr->addr_space_buffer);
case VIDEO1394_IOC_TALK_QUEUE_BUFFER:
{
struct video1394_wait v;
- struct video1394_queue_variable qv;
+ unsigned int *psizes = NULL;
struct dma_iso_ctx *d;
- qv.packet_sizes = NULL;
-
if (copy_from_user(&v, argp, sizeof(v)))
return -EFAULT;
}
if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
- unsigned int *psizes;
int buf_size = d->nb_cmd * sizeof(unsigned int);
+ struct video1394_queue_variable __user *p = argp;
+ unsigned int __user *qv;
- if (copy_from_user(&qv, argp, sizeof(qv)))
+ if (get_user(qv, &p->packet_sizes))
return -EFAULT;
psizes = kmalloc(buf_size, GFP_KERNEL);
if (!psizes)
return -ENOMEM;
- if (copy_from_user(psizes, qv.packet_sizes, buf_size)) {
+ if (copy_from_user(psizes, qv, buf_size)) {
kfree(psizes);
return -EFAULT;
}
-
- qv.packet_sizes = psizes;
}
spin_lock_irqsave(&d->lock,flags);
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d is already used",v.buffer);
spin_unlock_irqrestore(&d->lock,flags);
- if (qv.packet_sizes)
- kfree(qv.packet_sizes);
+ if (psizes)
+ kfree(psizes);
return -EFAULT;
}
if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
initialize_dma_it_prg_var_packet_queue(
- d, v.buffer, qv.packet_sizes,
+ d, v.buffer, psizes,
ohci);
}
}
}
- if (qv.packet_sizes)
- kfree(qv.packet_sizes);
+ if (psizes)
+ kfree(psizes);
return 0;
static int video1394_wr_wait32(unsigned int fd, unsigned int cmd, unsigned long arg,
struct file *file)
{
+ struct video1394_wait32 __user *argp = (void __user *)arg;
struct video1394_wait32 wait32;
struct video1394_wait wait;
mm_segment_t old_fs;
if (file->f_op->ioctl != video1394_ioctl)
return -EFAULT;
- if (copy_from_user(&wait32, (void *)arg, sizeof(wait32)))
+ if (copy_from_user(&wait32, argp, sizeof(wait32)))
return -EFAULT;
wait.channel = wait32.channel;
wait32.filltime.tv_sec = (int)wait.filltime.tv_sec;
wait32.filltime.tv_usec = (int)wait.filltime.tv_usec;
- if (copy_to_user((struct video1394_wait32 *)arg, &wait32, sizeof(wait32)))
+ if (copy_to_user(argp, &wait32, sizeof(wait32)))
ret = -EFAULT;
}
if (file->f_op->ioctl != video1394_ioctl)
return -EFAULT;
- if (copy_from_user(&wait32, (void *)arg, sizeof(wait32)))
+ if (copy_from_user(&wait32, (void __user *)arg, sizeof(wait32)))
return -EFAULT;
wait.channel = wait32.channel;
struct video1394_queue_variable {
unsigned int channel;
unsigned int buffer;
- unsigned int* packet_sizes; /* Buffer of size:
+ unsigned int __user * packet_sizes; /* Buffer of size:
buf_size / packet_size */
};
return count;
}
-#elif __x86_64__
+#elif defined(__x86_64__)
#define GET_TIME(x) rdtscl(x)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
-#elif __alpha__
+#elif defined(__alpha__)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "PCC"
char name[64];
char phys[32];
char type;
- volatile char reset;
- volatile char layout;
+ volatile s8 reset;
+ volatile s8 layout;
};
/*
if (pc110pad_used++)
return 0;
- pc110pad_interrupt(0,0,0);
- pc110pad_interrupt(0,0,0);
- pc110pad_interrupt(0,0,0);
+ pc110pad_interrupt(0,NULL,NULL);
+ pc110pad_interrupt(0,NULL,NULL);
+ pc110pad_interrupt(0,NULL,NULL);
outb(PC110PAD_ON, pc110pad_io + 2);
pc110pad_count = 0;
outb(PC110PAD_OFF, pc110pad_io + 2);
- if (request_irq(pc110pad_irq, pc110pad_interrupt, 0, "pc110pad", 0))
+ if (request_irq(pc110pad_irq, pc110pad_interrupt, 0, "pc110pad", NULL))
{
release_region(pc110pad_io, 4);
printk(KERN_ERR "pc110pad: Unable to get irq %d.\n", pc110pad_irq);
outb(PC110PAD_OFF, pc110pad_io + 2);
- free_irq(pc110pad_irq, 0);
+ free_irq(pc110pad_irq, NULL);
release_region(pc110pad_io, 4);
}
struct sk_buff *skb;
size_t copied;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (!cdev->ap.applid)
return -ENODEV;
struct sk_buff *skb;
u16 mlen;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (!cdev->ap.applid)
return -ENODEV;
if ((file->private_data = capidev_alloc()) == 0)
return -ENOMEM;
- return 0;
+ return nonseekable_open(inode, file);
}
static int
if ((len = strlen(inf->info_start)) <= count) {
if (copy_to_user(buf, inf->info_start, len))
return -EFAULT;
- file->f_pos += len;
+ *off += len;
return (len);
}
return (0);
(struct divert_info **) filep->private_data = &divert_info_head;
spin_unlock_irqrestore( &divert_info_lock, flags );
/* start_divert(); */
- return (0);
+ return nonseekable_open(ino, filep);
} /* isdn_divert_open */
/*******************/
card = kmalloc(sizeof(*card), GFP_KERNEL);
if (!card)
- return 0;
+ return NULL;
memset(card, 0, sizeof(*card));
cinfo = kmalloc(sizeof(*cinfo) * nr_controllers, GFP_KERNEL);
if (!cinfo) {
kfree(card);
- return 0;
+ return NULL;
}
memset(cinfo, 0, sizeof(*cinfo) * nr_controllers);
err_kfree:
kfree(p);
err:
- return 0;
+ return NULL;
}
void avmcard_dma_free(avmcard_dmainfo *p)
int str_length;
int *str_msg;
- if (off != &file->f_pos)
- return -ESPIPE;
-
if (!file->private_data) {
for (;;) {
while (
filep->private_data = NULL;
- return (0);
+ return nonseekable_open(ino, filep);
}
static int maint_close(struct inode *ino, struct file *filep)
if (*off)
return 0;
- if (off != &file->f_pos)
- return -ESPIPE;
divas_get_version(tmpbuf);
if (copy_to_user(buf + len, &tmpbuf, strlen(tmpbuf)))
static int divas_open(struct inode *inode, struct file *file)
{
- return (0);
+ return nonseekable_open(inode, file);
}
static int divas_close(struct inode *inode, struct file *file)
-/* $Id: platform.h,v 1.37 2004/03/20 17:44:29 armin Exp $
+/* $Id: platform.h,v 1.37.4.1 2004/07/28 14:47:21 armin Exp $
*
* platform.h
*
*/
static __inline__ void diva_os_sleep(dword mSec)
{
- unsigned long timeout = HZ * mSec / 1000 + 1;
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(timeout);
+ msleep(mSec);
}
static __inline__ void diva_os_wait(dword mSec)
{
config HISAX_TELESPCI
bool "Teles PCI"
- depends on PCI
+ depends on PCI && (BROKEN || !(SPARC64 || PPC))
help
This enables HiSax support for the Teles PCI.
See <file:Documentation/isdn/README.HiSax> on how to configure it.
config HISAX_NETJET
bool "NETjet card"
- depends on PCI
+ depends on PCI && (BROKEN || !(SPARC64 || PPC))
help
This enables HiSax support for the NetJet from Traverse
Technologies.
config HISAX_NETJET_U
bool "NETspider U card"
- depends on PCI
+ depends on PCI && (BROKEN || !(SPARC64 || PPC))
help
This enables HiSax support for the Netspider U interface ISDN card
from Traverse Technologies.
config HISAX_HFC_PCI
bool "HFC PCI-Bus cards"
- depends on PCI
+ depends on PCI && (BROKEN || !(SPARC64 || PPC))
help
This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
config HISAX_ENTERNOW_PCI
bool "Formula-n enter:now PCI card"
- depends on PCI
+ depends on PCI && (BROKEN || !(SPARC64 || PPC))
help
This enables HiSax support for the Formula-n enter:now PCI
ISDN card.
config HISAX_FRITZ_PCIPNP
tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ depends on PCI && EXPERIMENTAL
help
This enables the driver for the AVM Fritz!Card PCI,
Fritz!Card PCI v2 and Fritz!Card PnP.
return(0);
}
+#ifdef CONFIG_PCI
static struct pci_dev *dev_avm __initdata = NULL;
+#endif
#ifdef __ISAPNP__
static struct pnp_card *pnp_avm_c __initdata = NULL;
#endif
printk(KERN_INFO "FritzPnP: no ISA PnP present\n");
}
#endif
-#if CONFIG_PCI
+#ifdef CONFIG_PCI
if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM,
PCI_DEVICE_ID_AVM_A1, dev_avm))) {
cs->irq = dev_avm->irq;
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
* based on the teles driver from Jan den Ouden
*
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
* based on the teles driver from Jan den Ouden
*
}
}
+#ifdef CONFIG_PCI
#include <linux/pci.h>
static struct pci_device_id hisax_pci_tbl[] __initdata = {
};
MODULE_DEVICE_TABLE(pci, hisax_pci_tbl);
+#endif /* CONFIG_PCI */
module_init(HiSax_init);
module_exit(HiSax_exit);
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
* Thanks to Eicon Technology for documents and information
*
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
* Thanks to Elsa GmbH for documents and information
*
byteout(cs->hw.hfcD.addr | 1, reg);
}
ret = bytein(cs->hw.hfcD.addr);
-#if HFC_REG_DEBUG
+#ifdef HFC_REG_DEBUG
if (cs->debug & L1_DEB_HSCX_FIFO && (data != 2))
debugl1(cs, "t3c RD %02x %02x", reg, ret);
#endif
}
if (data)
byteout(cs->hw.hfcD.addr, value);
-#if HFC_REG_DEBUG
+#ifdef HFC_REG_DEBUG
if (cs->debug & L1_DEB_HSCX_FIFO && (data != HFCD_DATA_NODEB))
debugl1(cs, "t3c W%c %02x %02x", data ? 'D' : 'C', reg, value);
#endif
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
*/
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
*/
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
*/
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
}
bcs->hw.tiger.s_tot += s_cnt;
if (bcs->cs->debug & L1_DEB_HSCX)
- debugl1(bcs->cs,"tiger write_raw: c%d %x-%x %d/%d %d %x", bcs->channel,
- (u_int)buf, (u_int)p, s_cnt, cnt,
+ debugl1(bcs->cs,"tiger write_raw: c%d %p-%p %d/%d %d %x", bcs->channel,
+ buf, p, s_cnt, cnt,
bcs->hw.tiger.sendcnt, bcs->cs->hw.njet.irqstat0);
if (bcs->cs->debug & L1_DEB_HSCX_FIFO)
printframe(bcs->cs, bcs->hw.tiger.sp, s_cnt, "snd");
cs->bcs[1].hw.tiger.s_end = cs->bcs[0].hw.tiger.s_end;
memset(cs->bcs[0].hw.tiger.send, 0xff, NETJET_DMA_TXSIZE * sizeof(unsigned int));
- debugl1(cs, "tiger: send buf %x - %x", (u_int)cs->bcs[0].hw.tiger.send,
- (u_int)(cs->bcs[0].hw.tiger.send + NETJET_DMA_TXSIZE - 1));
+ debugl1(cs, "tiger: send buf %p - %p", cs->bcs[0].hw.tiger.send,
+ cs->bcs[0].hw.tiger.send + NETJET_DMA_TXSIZE - 1);
outl(virt_to_bus(cs->bcs[0].hw.tiger.send),
cs->hw.njet.base + NETJET_DMA_READ_START);
outl(virt_to_bus(cs->bcs[0].hw.tiger.s_irq),
"HiSax: No memory for tiger.rec\n");
return;
}
- debugl1(cs, "tiger: rec buf %x - %x", (u_int)cs->bcs[0].hw.tiger.rec,
- (u_int)(cs->bcs[0].hw.tiger.rec + NETJET_DMA_RXSIZE - 1));
+ debugl1(cs, "tiger: rec buf %p - %p", cs->bcs[0].hw.tiger.rec,
+ cs->bcs[0].hw.tiger.rec + NETJET_DMA_RXSIZE - 1);
cs->bcs[1].hw.tiger.rec = cs->bcs[0].hw.tiger.rec;
memset(cs->bcs[0].hw.tiger.rec, 0xff, NETJET_DMA_RXSIZE * sizeof(unsigned int));
outl(virt_to_bus(cs->bcs[0].hw.tiger.rec),
st5481_usb_device_ctrl_msg(adapter, FFMSK_D, 0xfc, NULL, NULL);
st5481_in_mode(d_in, L1_MODE_HDLC);
-#if LOOPBACK
+#ifdef LOOPBACK
// Turn loopback on (data sent on B and D looped back)
st5481_usb_device_ctrl_msg(cs, LBB, 0x04, NULL, NULL);
#endif
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
- * ../../../Documentation/isdn/HiSax.cert
+ * Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
#
config HYSDN
tristate "Hypercope HYSDN cards (Champ, Ergo, Metro) support (module only)"
- depends on m && PROC_FS && BROKEN_ON_SMP
+ depends on m && PROC_FS && PCI && BROKEN_ON_SMP
help
Say Y here if you have one of Hypercope's active PCI ISDN cards
Champ, Ergo and Metro. You will then get a module called hysdn.
}
}
detach_capi_ctr(ctrl);
- ctrl->driverdata = 0;
+ ctrl->driverdata = NULL;
kfree(card->hyctrlinfo);
***********************************************************/
-int hycapi_init()
+int hycapi_init(void)
{
int i;
for(i=0;i<CAPI_MAXAPPL;i++) {
/* write conf file -> boot or send cfg line to card */
/****************************************************/
static ssize_t
-hysdn_conf_write(struct file *file, const char *buf, size_t count, loff_t * off)
+hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t * off)
{
struct conf_writedata *cnf;
int i;
uchar ch, *cp;
- if (&file->f_pos != off) /* fs error check */
- return (-ESPIPE);
if (!count)
return (0); /* nothing to handle */
/* read conf file -> output card info data */
/*******************************************/
static ssize_t
-hysdn_conf_read(struct file *file, char *buf, size_t count, loff_t * off)
+hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t * off)
{
char *cp;
int i;
- if (off != &file->f_pos) /* fs error check */
- return -ESPIPE;
-
if (file->f_mode & FMODE_READ) {
if (!(cp = file->private_data))
return (-EFAULT); /* should never happen */
return (-EPERM); /* no permission this time */
}
unlock_kernel();
- return (0);
+ return nonseekable_open(ino, filep);
} /* hysdn_conf_open */
/***************************/
/* write log file -> set log level bits */
/****************************************/
static ssize_t
-hysdn_log_write(struct file *file, const char *buf, size_t count, loff_t * off)
+hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t * off)
{
ulong u = 0;
int found = 0;
long base = 10;
hysdn_card *card = (hysdn_card *) file->private_data;
- if (&file->f_pos != off) /* fs error check */
- return (-ESPIPE);
-
if (count > (sizeof(valbuf) - 1))
count = sizeof(valbuf) - 1; /* limit length */
if (copy_from_user(valbuf, buf, count))
/* read log file */
/******************/
static ssize_t
-hysdn_log_read(struct file *file, char *buf, size_t count, loff_t * off)
+hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t * off)
{
struct log_data *inf;
int len;
if ((len = strlen(inf->log_start)) <= count) {
if (copy_to_user(buf, inf->log_start, len))
return -EFAULT;
- file->f_pos += len;
+ *off += len;
return (len);
}
return (0);
return (-EPERM); /* no permission this time */
}
unlock_kernel();
- return (0);
+ return nonseekable_open(ino, filep);
} /* hysdn_log_open */
/*******************************************************************************/
int retval;
char *p;
- if (off != &file->f_pos)
- return -ESPIPE;
-
lock_kernel();
if (minor == ISDN_MINOR_STATUS) {
if (!file->private_data) {
int chidx;
int retval;
- if (off != &file->f_pos)
- return -ESPIPE;
-
if (minor == ISDN_MINOR_STATUS)
return -EPERM;
if (!dev->drivers)
}
#endif
out:
+ nonseekable_open(ino, filep);
return retval;
}
unsigned long expires = 0;
int tmp = 0;
int period = lp->cisco_keepalive_period;
- char debserint = lp->cisco_debserint;
+ s8 debserint = lp->cisco_debserint;
int rc = 0;
if (lp->p_encap != ISDN_NET_ENCAP_CISCOHDLCK)
* stuff needed to support the Linux X.25 PLP code on top of devices that
* can provide a lab_b service using the concap_proto mechanism.
* This module supports a network interface wich provides lapb_sematics
- * -- as defined in ../../Documentation/networking/x25-iface.txt -- to
+ * -- as defined in Documentation/networking/x25-iface.txt -- to
* the upper layer and assumes that the lower layer provides a reliable
* data link service by means of the concap_device_ops callbacks.
*
}
/* process a frame handed over to us from linux network layer. First byte
- semantics as defined in ../../Documentation/networking/x25-iface.txt
+ semantics as defined in Documentation/networking/x25-iface.txt
*/
int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
{
#
config ISDN_DRV_PCBIT
tristate "PCBIT-D support"
- depends on ISDN_I4L && ISA
+ depends on ISDN_I4L && ISA && (BROKEN || !PPC)
help
This enables support for the PCBIT ISDN-card. This card is
manufactured in Portugal by Octal. For running this card,
card->bar0 + TPAM_PAGE_REGISTER);
/* write the value */
- writel(val, card->bar0 + (((u32)addr) & TPAM_PAGE_SIZE));
+ writel(val, card->bar0 + (((unsigned long)addr) & TPAM_PAGE_SIZE));
}
/*
events; also, the PowerBook button device will be enabled so you can
change the screen brightness.
-config MAC_FLOPPY
- bool "Support for PowerMac floppy"
- depends on PPC_PMAC && !PPC_PMAC64
- help
- If you have a SWIM-3 (Super Woz Integrated Machine 3; from Apple)
- floppy controller, say Y here. Most commonly found in PowerMacs.
-
config MAC_SERIAL
tristate "Support for PowerMac serial ports (OBSOLETE DRIVER)"
depends on PPC_PMAC && BROKEN
G5 machines.
config ANSLCD
- bool "Support for ANS LCD display"
+ tristate "Support for ANS LCD display"
depends on ADB_CUDA && PPC_PMAC
endmenu
static __inline__ void adb_wait_ms(unsigned int ms)
{
if (current->pid && adb_probe_task_pid &&
- adb_probe_task_pid == current->pid) {
- set_task_state(current, TASK_UNINTERRUPTIBLE);
- schedule_timeout(1 + ms * HZ / 1000);
- } else
+ adb_probe_task_pid == current->pid)
+ msleep(ms);
+ else
mdelay(ms);
}
write_lock_irq(&adb_handler_lock);
}
ret = 0;
- adb_handler[index].handler = 0;
+ adb_handler[index].handler = NULL;
}
write_unlock_irq(&adb_handler_lock);
up(&adb_handler_sem);
#define FLAG_POWER_FROM_FN 0x00000002
#define FLAG_EMU_FWDEL_DOWN 0x00000004
-static struct adbhid *adbhid[16] = { 0 };
+static struct adbhid *adbhid[16];
static void adbhid_probe(void);
if (adbhid[id]->keycode)
kfree(adbhid[id]->keycode);
kfree(adbhid[id]);
- adbhid[id] = 0;
+ adbhid[id] = NULL;
}
}
static ssize_t __pmac
-anslcd_write( struct file * file, const char * buf,
+anslcd_write( struct file * file, const char __user * buf,
size_t count, loff_t *ppos )
{
- const char * p = buf;
+ const char __user *p = buf;
int i;
#ifdef DEBUG
anslcd_ioctl( struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg )
{
- char ch, *temp;
+ char ch, __user *temp;
#ifdef DEBUG
printk(KERN_DEBUG "LCD: ioctl(%d,%d)\n",cmd,arg);
anslcd_write_byte_ctrl ( 0x02 );
return 0;
case ANSLCD_SENDCTRL:
- temp = (char *) arg;
+ temp = (char __user *) arg;
__get_user(ch, temp);
for (; ch; temp++) { /* FIXME: This is ugly, but should work, as a \0 byte is not a valid command code */
anslcd_write_byte_ctrl ( ch );
"* Welcome to *" /* Line #2 */
"********************"; /* Line #4 */
-int __init
+static int __init
anslcd_init(void)
{
int a;
return 0;
}
-__initcall(anslcd_init);
+static void __exit
+anslcd_exit(void)
+{
+ misc_deregister(&anslcd_dev);
+ iounmap(anslcd_ptr);
+}
+module_init(anslcd_init);
+module_exit(anslcd_exit);
req->data[i] = req->data[i+1];
--req->nbytes;
- req->next = 0;
+ req->next = NULL;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
local_irq_save(flags);
if (in_8(&adb->intr.r) != 0)
- macio_adb_interrupt(0, 0, 0);
+ macio_adb_interrupt(0, NULL, NULL);
local_irq_restore(flags);
}
static void rxdma_start(struct mac_serial * info, int current);
static void rxdma_to_tty(struct mac_serial * info);
-#ifndef MIN
-#define MIN(a,b) ((a) < (b) ? (a) : (b))
-#endif
-
/*
* tmp_buf is used as a temporary buffer by serial_write. We need to
* lock it in case the copy_from_user blocks while swapping in a page,
if (from_user) {
down(&tmp_buf_sem);
while (1) {
- c = MIN(count,
- MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
+ c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
if (c <= 0)
break;
break;
}
spin_lock_irqsave(&info->lock, flags);
- c = MIN(c, MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
+ c = min_t(int, c, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
memcpy(info->xmit_buf + info->xmit_head, tmp_buf, c);
info->xmit_head = ((info->xmit_head + c) &
(SERIAL_XMIT_SIZE-1));
} else {
while (1) {
spin_lock_irqsave(&info->lock, flags);
- c = MIN(count,
- MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
+ c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
+ SERIAL_XMIT_SIZE - info->xmit_head));
if (c <= 0) {
spin_unlock_irqrestore(&info->lock, flags);
break;
} else if (char_time == 0)
char_time = 1;
if (timeout)
- char_time = MIN(char_time, timeout);
+ char_time = min_t(unsigned long, char_time, timeout);
while ((read_zsreg(info->zs_channel, 1) & ALL_SNT) == 0) {
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(char_time);
#endif /* CONFIG_BLK_DEV_IDE */
return -ENODEV;
}
+EXPORT_SYMBOL(check_media_bay);
int __pmac check_media_bay_by_base(unsigned long base, int what)
{
/* Force an immediate detect */
set_mb_power(bay, 0);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(MS_TO_HZ(MB_POWER_DELAY));
+ msleep(MB_POWER_DELAY);
bay->content_id = MB_NO;
bay->last_value = bay->ops->content(bay);
bay->value_count = MS_TO_HZ(MB_STABLE_DELAY);
bay->state = mb_empty;
do {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(MS_TO_HZ(MB_POLL_DELAY));
+ msleep(MB_POLL_DELAY);
media_bay_step(i);
} while((bay->state != mb_empty) &&
(bay->state != mb_up));
bay->sleeping = 1;
set_mb_power(bay, 0);
up(&bay->lock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(MS_TO_HZ(MB_POLL_DELAY));
+ msleep(MB_POLL_DELAY);
mdev->ofdev.dev.power_state = state;
}
return 0;
/* Force MB power to 0 */
down(&bay->lock);
set_mb_power(bay, 0);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(MS_TO_HZ(MB_POWER_DELAY));
+ msleep(MB_POWER_DELAY);
if (bay->ops->content(bay) != bay->content_id) {
printk("mediabay%d: content changed during sleep...\n", bay->index);
up(&bay->lock);
bay->cd_retry = 0;
#endif
do {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(MS_TO_HZ(MB_POLL_DELAY));
+ msleep(MB_POLL_DELAY);
media_bay_step(bay->index);
} while((bay->state != mb_empty) &&
(bay->state != mb_up));
return nw;
}
+static int start_fcu(void)
+{
+ unsigned char buf = 0xff;
+ int rc;
+
+ rc = fan_write_reg(0xe, &buf, 1);
+ if (rc < 0)
+ return -EIO;
+ rc = fan_write_reg(0x2e, &buf, 1);
+ if (rc < 0)
+ return -EIO;
+ return 0;
+}
+
static int set_rpm_fan(int fan, int rpm)
{
unsigned char buf[2];
down(&driver_lock);
+ if (start_fcu() < 0) {
+ printk(KERN_ERR "kfand: failed to start FCU\n");
+ up(&driver_lock);
+ goto out;
+ }
+
/* Set the PCI fan once for now */
set_pwm_fan(SLOTS_FAN_PWM_ID, SLOTS_FAN_DEFAULT_PWM);
schedule_timeout(HZ - elapsed);
}
+ out:
DBG("main_control_loop ended\n");
ctrl_task = 0;
req->complete = 1;
return -EINVAL;
}
- req->next = 0;
+ req->next = NULL;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
* disable_irq(), would that work on m68k ? --BenH
*/
local_irq_save(flags);
- cuda_interrupt(0, 0, 0);
+ cuda_interrupt(0, NULL, NULL);
local_irq_restore(flags);
}
}
#endif /* CONFIG_PMAC_PBOOK */
/* Create /proc/pmu */
- proc_pmu_root = proc_mkdir("pmu", 0);
+ proc_pmu_root = proc_mkdir("pmu", NULL);
if (proc_pmu_root) {
int i;
proc_pmu_info = create_proc_read_entry("info", 0, proc_pmu_root,
}
if (pmu_state == idle)
adb_int_pending = 1;
- via_pmu_interrupt(0, 0, 0);
+ via_pmu_interrupt(0, NULL, NULL);
udelay(10);
}
return -EINVAL;
}
- req->next = 0;
+ req->next = NULL;
req->sent = 0;
req->complete = 0;
return;
if (disable_poll)
return;
- via_pmu_interrupt(0, 0, 0);
+ via_pmu_interrupt(0, NULL, NULL);
}
void __openfirmware
/* Kicks ADB read when PMU is suspended */
adb_int_pending = 1;
do {
- via_pmu_interrupt(0, 0, 0);
+ via_pmu_interrupt(0, NULL, NULL);
} while (pmu_suspended && (adb_int_pending || pmu_state != idle
|| req_awaiting_reply));
}
if (!via)
return;
while((pmu_state != idle && pmu_state != locked) || !req->complete)
- via_pmu_interrupt(0, 0, 0);
+ via_pmu_interrupt(0, NULL, NULL);
}
/* This function loops until the PMU is idle and prevents it from
spin_unlock_irqrestore(&pmu_lock, flags);
if (req_awaiting_reply)
adb_int_pending = 1;
- via_pmu_interrupt(0, 0, 0);
+ via_pmu_interrupt(0, NULL, NULL);
spin_lock_irqsave(&pmu_lock, flags);
if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) {
#ifdef SUSPEND_USES_PMU
printk(KERN_ERR "PMU: extra ADB reply\n");
return;
}
- req_awaiting_reply = 0;
+ req_awaiting_reply = NULL;
if (len <= 2)
req->reply_len = 0;
else {
pmu_irq_stats[1]++;
adb_int_pending = 1;
spin_unlock_irqrestore(&pmu_lock, flags);
- via_pmu_interrupt(0, 0, 0);
+ via_pmu_interrupt(0, NULL, NULL);
return IRQ_HANDLED;
}
return IRQ_NONE;
if (n->list.next == 0)
return -ENOENT;
list_del(&n->list);
- n->list.next = 0;
+ n->list.next = NULL;
return 0;
}
/* Force a poll of ADB interrupts */
adb_int_pending = 1;
- via_pmu_interrupt(0, 0, 0);
+ via_pmu_interrupt(0, NULL, NULL);
/* Restart jiffies & scheduling */
wakeup_decrementer();
lock_kernel();
if (pp != 0) {
- file->private_data = 0;
+ file->private_data = NULL;
spin_lock_irqsave(&all_pvt_lock, flags);
list_del(&pp->list);
spin_unlock_irqrestore(&all_pvt_lock, flags);
u_int cmd, u_long arg)
{
struct pmu_private *pp = filp->private_data;
+ __u32 __user *argp = (__u32 __user *)arg;
int error;
switch (cmd) {
sleep_in_progress = 0;
return error;
case PMU_IOC_CAN_SLEEP:
- return put_user((u32)can_sleep, (__u32 *)arg);
+ return put_user((u32)can_sleep, argp);
#ifdef CONFIG_PMAC_BACKLIGHT
/* Backlight should have its own device or go via
error = get_backlight_level();
if (error < 0)
return error;
- return put_user(error, (__u32 *)arg);
+ return put_user(error, argp);
case PMU_IOC_SET_BACKLIGHT:
{
__u32 value;
if (sleep_in_progress)
return -EBUSY;
- error = get_user(value, (__u32 *)arg);
+ error = get_user(value, argp);
if (!error)
error = set_backlight_level(value);
return error;
#endif /* CONFIG_INPUT_ADBHID */
#endif /* CONFIG_PMAC_BACKLIGHT */
case PMU_IOC_GET_MODEL:
- return put_user(pmu_kind, (__u32 *)arg);
+ return put_user(pmu_kind, argp);
case PMU_IOC_HAS_ADB:
- return put_user(pmu_has_adb, (__u32 *)arg);
+ return put_user(pmu_has_adb, argp);
}
return -EINVAL;
}
/*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
};
-int pmu_probe()
+int pmu_probe(void)
{
if (macintosh_config->adb_type == MAC_ADB_PB1) {
pmu_kind = PMU_68K_V1;
}
static void
-recv_byte()
+recv_byte(void)
{
char c;
}
static void
-pmu_start()
+pmu_start(void)
{
unsigned long flags;
struct adb_request *req;
}
void
-pmu_poll()
+pmu_poll(void)
{
unsigned long flags;
if (uptodate)
multipath_end_bh_io(mp_bh, uptodate);
- else {
+ else if ((bio->bi_rw & (1 << BIO_RW_AHEAD)) == 0) {
/*
* oops, IO error:
*/
bdevname(rdev->bdev,b),
(unsigned long long)bio->bi_sector);
multipath_reschedule_retry(mp_bh);
- }
+ } else
+ multipath_end_bh_io(mp_bh, 0);
rdev_dec_pending(rdev, conf->mddev);
return 0;
}
" to another IO path\n",
bdevname(bio->bi_bdev,b),
(unsigned long long)bio->bi_sector);
+ *bio = *(mp_bh->master_bio);
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
+ bio->bi_rw |= (1 << BIO_RW_FAILFAST);
+ bio->bi_end_io = multipath_end_request;
+ bio->bi_private = mp_bh;
generic_make_request(bio);
}
}
*/
#define RAID5_DEBUG 0
#define RAID5_PARANOIA 1
-#if RAID5_PARANOIA && CONFIG_SMP
+#if RAID5_PARANOIA && defined(CONFIG_SMP)
# define CHECK_DEVLOCK() if (!spin_is_locked(&conf->device_lock)) BUG()
#else
# define CHECK_DEVLOCK()
#define RAID6_DEBUG 0 /* Extremely verbose printk */
#define RAID6_PARANOIA 1 /* Check spinlocks */
#define RAID6_DUMPSTATE 0 /* Include stripe cache state in /proc/mdstat */
-#if RAID6_PARANOIA && CONFIG_SMP
+#if RAID6_PARANOIA && defined(CONFIG_SMP)
# define CHECK_DEVLOCK() if (!spin_is_locked(&conf->device_lock)) BUG()
#else
# define CHECK_DEVLOCK()
config DVB_B2C2_SKYSTAR
tristate "Technisat Skystar2 PCI"
- depends on DVB_CORE
+ depends on DVB_CORE && PCI
help
Support for the Skystar2 PCI DVB card by Technisat, which
is equipped with the FlexCopII chipset by B2C2.
neq |= f->maskandnotmode[i] & xor;
}
- if (f->doneq & !neq)
+ if (f->doneq && !neq)
return 0;
return feed->cb.sec (feed->feed.sec.secbuf, feed->feed.sec.seclen,
/* Copy arguments into temp kernel buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_NONE:
- parg = NULL;
+ /*
+ * For this command, the pointer is actually an integer
+ * argument.
+ */
+ parg = (void *) arg;
break;
case _IOC_READ: /* some v4l ioctls are marked wrong ... */
case _IOC_WRITE:
#include "dvb_functions.h"
-static inline __u32 iov_crc32( __u32 c, struct iovec *iov, unsigned int cnt )
+static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt )
{
unsigned int j;
for (j = 0; j < cnt; j++)
/* Check CRC32, we've got it in our skb already. */
unsigned short ulen = htons(priv->ule_sndu_len);
unsigned short utype = htons(priv->ule_sndu_type);
- struct iovec iov[4] = {
+ struct kvec iov[4] = {
{ &ulen, sizeof ulen },
{ &utype, sizeof utype },
{ NULL, 0 },
#include <linux/list.h>
#include <linux/devfs_fs_kernel.h>
-#define DVB_MAJOR 250
+#define DVB_MAJOR 212
#define DVB_DEVICE_VIDEO 0
#define DVB_DEVICE_AUDIO 1
*/
-
-#define __KERNEL_SYSCALLS__
#include <linux/module.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
loff_t filesize;
char *dp;
- fd = open(fn, 0, 0);
+ fd = sys_open(fn, 0, 0);
if (fd == -1) {
printk("%s: unable to open '%s'.\n", __FUNCTION__, fn);
return -EIO;
}
- filesize = lseek(fd, 0L, 2);
+ filesize = sys_lseek(fd, 0L, 2);
if (filesize <= 0 || filesize < SP8870_FIRMWARE_OFFSET + SP8870_FIRMWARE_SIZE) {
printk("%s: firmware filesize to small '%s'\n", __FUNCTION__, fn);
sys_close(fd);
return -EIO;
}
- lseek(fd, SP8870_FIRMWARE_OFFSET, 0);
- if (read(fd, dp, SP8870_FIRMWARE_SIZE) != SP8870_FIRMWARE_SIZE) {
+ sys_lseek(fd, SP8870_FIRMWARE_OFFSET, 0);
+ if (sys_read(fd, dp, SP8870_FIRMWARE_SIZE) != SP8870_FIRMWARE_SIZE) {
printk("%s: failed to read '%s'.\n",__FUNCTION__, fn);
vfree(dp);
sys_close(fd);
next 0x4000 loaded. This may change in future versions.
*/
-#define __KERNEL_SYSCALLS__
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
// Load the firmware
set_fs(get_ds());
- fd = open(sp887x_firmware, 0, 0);
+ fd = sys_open(sp887x_firmware, 0, 0);
if (fd < 0) {
printk(KERN_WARNING "%s: Unable to open firmware %s\n", __FUNCTION__,
sp887x_firmware);
return -EIO;
}
- filesize = lseek(fd, 0L, 2);
+ filesize = sys_lseek(fd, 0L, 2);
if (filesize <= 0) {
printk(KERN_WARNING "%s: Firmware %s is empty\n", __FUNCTION__,
sp887x_firmware);
// read it!
// read the first 16384 bytes from the file
// ignore the first 10 bytes
- lseek(fd, 10, 0);
- if (read(fd, firmware, fw_size) != fw_size) {
+ sys_lseek(fd, 10, 0);
+ if (sys_read(fd, firmware, fw_size) != fw_size) {
printk(KERN_WARNING "%s: Failed to read firmware\n", __FUNCTION__);
vfree(firmware);
sys_close(fd);
*/
-#define __KERNEL_SYSCALLS__
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/unistd.h>
#include <linux/fcntl.h>
#include <linux/errno.h>
#include <linux/syscalls.h>
// Load the firmware
set_fs(get_ds());
- fd = open(tda1004x_firmware, 0, 0);
+ fd = sys_open(tda1004x_firmware, 0, 0);
if (fd < 0) {
printk("%s: Unable to open firmware %s\n", __FUNCTION__,
tda1004x_firmware);
return -EIO;
}
- filesize = lseek(fd, 0L, 2);
+ filesize = sys_lseek(fd, 0L, 2);
if (filesize <= 0) {
printk("%s: Firmware %s is empty\n", __FUNCTION__,
tda1004x_firmware);
}
// read it!
- lseek(fd, fw_offset, 0);
- if (read(fd, firmware, fw_size) != fw_size) {
+ sys_lseek(fd, fw_offset, 0);
+ if (sys_read(fd, firmware, fw_size) != fw_size) {
printk("%s: Failed to read firmware\n", __FUNCTION__);
vfree(firmware);
sys_close(fd);
config RADIO_MAXIRADIO
tristate "Guillemot MAXI Radio FM 2000 radio"
- depends on VIDEO_DEV
+ depends on VIDEO_DEV && PCI
---help---
Choose Y here if you have this radio card. This card may also be
found as Gemtek PCI FM.
if ((i=aci_rw_cmd(ACI_READ_TUNERSTATION, -1, -1))<0)
return i;
-#if DEBUG
+#ifdef DEBUG
printk("check_sig: 0x%x\n", i);
#endif
if (i & 0x80) {
if ((i=aci_rds_cmd(RDS_RXVALUE, &buf, 1))<0)
return i;
-#if DEBUG
+#ifdef DEBUG
printk("rds-signal: %d\n", buf);
#endif
if (buf > 15) {
unsigned long *freq = arg;
pcm20->freq = *freq;
i=pcm20_setfreq(pcm20, pcm20->freq);
-#if DEBUG
+#ifdef DEBUG
printk("First view (setfreq): 0x%x\n", i);
#endif
return i;
struct saa7134_buf *buf)
{
struct saa7134_buf *next = NULL;
-#if DEBUG_SPINLOCKS
+#ifdef DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
struct saa7134_dmaqueue *q,
unsigned int state)
{
-#if DEBUG_SPINLOCKS
+#ifdef DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
dprintk("buffer_finish %p\n",q->curr);
{
struct saa7134_buf *buf,*next = NULL;
-#if DEBUG_SPINLOCKS
+#ifdef DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
BUG_ON(NULL != q->curr);
enum v4l2_field cap = V4L2_FIELD_ANY;
enum v4l2_field ov = V4L2_FIELD_ANY;
-#if DEBUG_SPINLOCKS
+#ifdef DEBUG_SPINLOCKS
BUG_ON(!spin_is_locked(&dev->slock));
#endif
#include <linux/videodev.h>
#include <linux/spinlock.h>
#include <linux/sem.h>
+#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <asm/io.h>
{NULL, 0, 0, 0},
};
-struct procfs_io {
- char *buffer;
- char *end;
- int neof;
- int count;
- int count_current;
-};
-
static void
setparam (struct zoran *zr,
char *name,
}
}
-static int
-print_procfs (struct procfs_io *io,
- const char *fmt,
- ...)
-{
- va_list args;
- int i;
-
- if (io->buffer >= io->end) {
- io->neof++;
- return 0;
- }
- if (io->count > io->count_current++)
- return 0;
- va_start(args, fmt);
- i = vsprintf(io->buffer, fmt, args);
- io->buffer += i;
- va_end(args);
- return i;
-}
-
-static void
-zoran_procfs_output (struct procfs_io *io,
- void *data)
+static int zoran_show(struct seq_file *p, void *v)
{
+ struct zoran *zr = p->private;
int i;
- struct zoran *zr;
- zr = (struct zoran *) data;
- print_procfs(io, "ZR36067 registers:");
- for (i = 0; i < 0x130; i += 4) {
- if (!(i % 16)) {
- print_procfs(io, "\n%03X", i);
- };
- print_procfs(io, " %08X ", btread(i));
- };
- print_procfs(io, "\n");
+ seq_printf(p, "ZR36067 registers:\n");
+ for (i = 0; i < 0x130; i += 16)
+ seq_printf(p, "%03X %08X %08X %08X %08X \n", i,
+ btread(i), btread(i+4), btread(i+8), btread(i+12));
+ return 0;
}
-static int
-zoran_read_proc (char *buffer,
- char **start,
- off_t offset,
- int size,
- int *eof,
- void *data)
+static int zoran_open(struct inode *inode, struct file *file)
{
- struct procfs_io io;
- int nbytes;
-
- io.buffer = buffer;
- io.end = buffer + size - 128; // Just to make it a little bit safer
- io.count = offset;
- io.count_current = 0;
- io.neof = 0;
- zoran_procfs_output(&io, data);
- *start = (char *) (io.count_current - io.count);
- nbytes = (int) (io.buffer - buffer);
- *eof = !io.neof;
- return nbytes;
-
- return 0;
+ struct zoran *data = PDE(inode)->data;
+ return single_open(file, zoran_show, data);
}
-static int
-zoran_write_proc (struct file *file,
- const char __user *buffer,
- unsigned long count,
- void *data)
+static ssize_t zoran_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
+ struct zoran *zr = PDE(file->f_dentry->d_inode)->data;
char *string, *sp;
char *line, *ldelim, *varname, *svar, *tdelim;
- struct zoran *zr;
if (count > 32768) /* Stupidity filter */
return -EINVAL;
- zr = (struct zoran *) data;
-
string = sp = vmalloc(count + 1);
if (!string) {
dprintk(1,
return -EFAULT;
}
string[count] = 0;
- dprintk(4, KERN_INFO "%s: write_proc: name=%s count=%lu data=%x\n",
- ZR_DEVNAME(zr), file->f_dentry->d_name.name, count, (int) data);
+ dprintk(4, KERN_INFO "%s: write_proc: name=%s count=%zu zr=%p\n",
+ ZR_DEVNAME(zr), file->f_dentry->d_name.name, count, zr);
ldelim = " \t\n";
tdelim = "=";
line = strpbrk(sp, ldelim);
return count;
}
+
+static struct file_operations zoran_operations = {
+ .open = zoran_open,
+ .read = seq_read,
+ .write = zoran_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
int
snprintf(name, 7, "zoran%d", zr->id);
if ((zr->zoran_proc = create_proc_entry(name, 0, NULL))) {
- zr->zoran_proc->read_proc = zoran_read_proc;
- zr->zoran_proc->write_proc = zoran_write_proc;
zr->zoran_proc->data = zr;
zr->zoran_proc->owner = THIS_MODULE;
+ zr->zoran_proc->proc_fops = &zoran_operations;
dprintk(2,
KERN_INFO
"%s: procfs entry /proc/%s allocated. data=%p\n",
char name[8];
snprintf(name, 7, "zoran%d", zr->id);
- if (zr->zoran_proc) {
+ if (zr->zoran_proc)
remove_proc_entry(name, NULL);
- }
zr->zoran_proc = NULL;
#endif
}
# drivers/mtd/chips/Kconfig
-# $Id: Kconfig,v 1.8 2004/07/13 22:32:02 dwmw2 Exp $
+# $Id: Kconfig,v 1.9 2004/07/16 15:32:14 dwmw2 Exp $
menu "RAM/ROM/Flash chip drivers"
depends on MTD!=n
with this driver will return -ENODEV upon access.
config MTD_OBSOLETE_CHIPS
+ depends on MTD && BROKEN
bool "Older (theoretically obsoleted now) drivers for non-CFI chips"
help
This option does not enable any code directly, but will allow you to
ofs = instr->addr;
len = instr->len;
- ret = cfi_amdstd_varsize_frob(mtd, do_erase_oneblock, ofs, len, 0);
+ ret = cfi_amdstd_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
if (ret)
return ret;
int ret;
DEBUG(MTD_DEBUG_LEVEL3,
- "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
+ "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n",
__func__, ofs, len);
debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
int ret;
DEBUG(MTD_DEBUG_LEVEL3,
- "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
+ "%s: lock status before, ofs=0x%08llx, len=0x%08zX\n",
__func__, ofs, len);
debug_dump_locks(mtd, do_printlockstatus_oneblock, ofs, len, 0);
*
* (C) 2000 Red Hat. GPL'd
*
- * $Id: cfi_cmdset_0020.c,v 1.13 2004/07/12 21:52:50 dwmw2 Exp $
+ * $Id: cfi_cmdset_0020.c,v 1.14 2004/07/20 02:44:25 dwmw2 Exp $
*
* 10/10/2000 Nicolas Pitre <nico@cam.org>
* - completely revamped method functions so they are aware and
kfree(cfi);
}
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define cfi_staa_init init_module
-#define cfi_staa_exit cleanup_module
-#endif
-
static char im_name[]="cfi_cmdset_0020";
int __init cfi_staa_init(void)
pagenr = to >> PAGE_SHIFT;
offset = to & ~PAGE_MASK;
- DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %d pagenr = %d offset = %d\n",
+ DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n",
buf, (long)to, len, pagenr, offset);
/* see if we have to do a partial write at the start */
down(&dev->wrbuf_mutex);
- DEBUG(3, "blkmtd: write: start_len = %d len = %d end_len = %d pagecnt = %d\n",
+ DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",
start_len, len, end_len, pagecnt);
if(start_len) {
/* do partial start region */
struct page *page;
- DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n",
+ DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n",
pagenr, start_len, offset);
BUG_ON(!buf);
page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
lock_page(page);
if(PageDirty(page)) {
- err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d\n",
+ err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
to, start_len, len, end_len, pagenr);
BUG();
}
if(end_len) {
/* do the third region */
struct page *page;
- DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n",
+ DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n",
pagenr, end_len);
BUG_ON(!buf);
page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
lock_page(page);
if(PageDirty(page)) {
- err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d\n",
+ err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
to, start_len, len, end_len, pagenr);
BUG();
}
if(bio)
blkmtd_write_out(bio);
- DEBUG(2, "blkmtd: write: end, retlen = %d, err = %d\n", *retlen, err);
+ DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);
up(&dev->wrbuf_mutex);
if(retlen)
size_t from;
u_long len;
int err = -EIO;
- int retlen;
+ size_t retlen;
instr->state = MTD_ERASING;
from = instr->addr;
len = instr->len;
/* check erase region has valid start and length */
- DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%x len = 0x%lx\n",
+ DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%zx len = 0x%lx\n",
mtd->name+9, from, len);
while(numregions) {
DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n",
if(!numregions) {
/* Not a valid erase block */
- err("erase: invalid erase request 0x%lX @ 0x%08X", len, from);
+ err("erase: invalid erase request 0x%lX @ 0x%08zX", len, from);
instr->state = MTD_ERASE_FAILED;
err = -EIO;
}
if(instr->state != MTD_ERASE_FAILED) {
/* do the erase */
- DEBUG(3, "Doing erase from = %d len = %ld\n", from, len);
+ DEBUG(3, "Doing erase from = %zd len = %ld\n", from, len);
err = write_pages(dev, NULL, from, len, &retlen);
if(err || retlen != len) {
err("erase failed err = %d", err);
int pagenr, pages;
size_t thislen = 0;
- DEBUG(2, "blkmtd: read: dev = `%s' from = %ld len = %d buf = %p\n",
- mtd->name+9, (long int)from, len, buf);
+ DEBUG(2, "blkmtd: read: dev = `%s' from = %lld len = %zd buf = %p\n",
+ mtd->name+9, from, len, buf);
if(from > mtd->size)
return -EINVAL;
readerr:
if(retlen)
*retlen = thislen;
- DEBUG(2, "blkmtd: end read: retlen = %d, err = %d\n", thislen, err);
+ DEBUG(2, "blkmtd: end read: retlen = %zd, err = %d\n", thislen, err);
return err;
}
if(!len)
return 0;
- DEBUG(2, "blkmtd: write: dev = `%s' to = %ld len = %d buf = %p\n",
- mtd->name+9, (long int)to, len, buf);
+ DEBUG(2, "blkmtd: write: dev = `%s' to = %lld len = %zd buf = %p\n",
+ mtd->name+9, to, len, buf);
if(to >= mtd->size) {
return -ENOSPC;
{
struct mtd_erase_region_info *info = NULL;
- DEBUG(2, "calc_erase_regions, es = %d size = %d regions = %d\n",
+ DEBUG(2, "calc_erase_regions, es = %zd size = %zd regions = %d\n",
erase_size, total_size, *regions);
/* Make any user specified erasesize be a power of 2
and at least PAGE_SIZE */
break;
}
} while(!(*regions));
- DEBUG(2, "calc_erase_regions done, es = %d size = %d regions = %d\n",
+ DEBUG(2, "calc_erase_regions done, es = %zd size = %zd regions = %d\n",
erase_size, total_size, *regions);
return info;
}
size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
-static int doc_writev_ecc(struct mtd_info *mtd, const struct iovec *vecs,
+static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen,
u_char *eccbuf, struct nand_oobinfo *oobsel);
static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t * retlen, u_char * buf)
{
/* Just a special case of doc_read_ecc */
- return doc_read_ecc(mtd, from, len, retlen, buf, NULL, 0);
+ return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
}
static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
size_t * retlen, const u_char * buf)
{
char eccbuf[6];
- return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, 0);
+ return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
}
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
return 0;
}
-static int doc_writev_ecc(struct mtd_info *mtd, const struct iovec *vecs,
+static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen,
u_char *eccbuf, struct nand_oobinfo *oobsel)
{
static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf);
static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf, u_char *eccbuf, int oobsel);
+ size_t *retlen, u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel);
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf, u_char *eccbuf, int oobsel);
+ size_t *retlen, const u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel);
static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t *retlen, u_char *buf);
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t *retlen, u_char *buf)
{
/* Just a special case of doc_read_ecc */
- return doc_read_ecc(mtd, from, len, retlen, buf, NULL, 0);
+ return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
}
static int doc_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf, u_char *eccbuf, int oobsel)
+ size_t *retlen, u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel)
{
int i, ret;
volatile char dummy;
size_t *retlen, const u_char *buf)
{
char eccbuf[6];
- return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, 0);
+ return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
}
static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf, u_char *eccbuf, int oobsel)
+ size_t *retlen, const u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel)
{
int i,ret = 0;
volatile char dummy;
/*
- * $Id: pmc551.c,v 1.26 2004/07/14 17:25:07 dwmw2 Exp $
+ * $Id: pmc551.c,v 1.27 2004/07/20 02:44:26 dwmw2 Exp $
*
* PMC551 PCI Mezzanine Ram Device
*
#include <linux/mtd/pmc551.h>
#include <linux/mtd/compatmac.h>
-#if LINUX_VERSION_CODE > 0x20300
-#define PCI_BASE_ADDRESS(dev) (dev->resource[0].start)
-#else
-#define PCI_BASE_ADDRESS(dev) (dev->base_address[0])
-#endif
-
static struct mtd_info *pmc551list;
static int pmc551_erase (struct mtd_info *mtd, struct erase_info *instr)
(size<1024)?size:(size<1048576)?size>>10:size>>20,
(size<1024)?'B':(size<1048576)?'K':'M',
size, ((dcmd&(0x1<<3)) == 0)?"non-":"",
- PCI_BASE_ADDRESS(dev)&PCI_BASE_ADDRESS_MEM_MASK );
+ (dev->resource[0].start)&PCI_BASE_ADDRESS_MEM_MASK );
/*
* Check to see the state of the memory
}
printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%lX\n",
- PCI_BASE_ADDRESS(PCI_Device));
+ PCI_Device->resource[0].start);
/*
* The PMC551 device acts VERY weird if you don't init it
printk(KERN_NOTICE "pmc551: Using specified aperture size %dM\n", asize>>20);
priv->asize = asize;
}
- priv->start = ioremap((PCI_BASE_ADDRESS(PCI_Device)
+ priv->start = ioremap(((PCI_Device->resource[0].start)
& PCI_BASE_ADDRESS_MEM_MASK),
priv->asize);
{
erase_unit_header_t header;
loff_t offset, max_offset;
- int ret;
+ size_t ret;
+ int err;
part->header.FormattedSize = 0;
max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size;
/* Search first megabyte for a valid FTL header */
(offset + sizeof(header)) < max_offset;
offset += part->mbd.mtd->erasesize ? : 0x2000) {
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
+ err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
(unsigned char *)&header);
- if (ret)
- return ret;
+ if (err)
+ return err;
if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break;
}
if (ret) {
printk(KERN_NOTICE "ftl_cs: block write failed!\n");
printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr"
- " = 0x%x, Offset = 0x%x\n", log_addr, virt_addr,
+ " = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr,
offset);
return -EIO;
}
u16 pot = inftl->LastFreeEUN;
int silly = inftl->nb_blocks;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=0x%x,"
- "desperate=%d)\n", (int)inftl, desperate);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p,"
+ "desperate=%d)\n", inftl, desperate);
/*
* Normally, we force a fold to happen before we run out of free
struct inftl_oob oob;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=0x%x,thisVUC=%d,"
- "pending=%d)\n", (int)inftl, thisVUC, pendingblock);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
+ "pending=%d)\n", inftl, thisVUC, pendingblock);
memset(BlockMap, 0xff, sizeof(BlockMap));
memset(BlockDeleted, 0, sizeof(BlockDeleted));
u16 ChainLength = 0, thislen;
u16 chain, EUN;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=0x%x,"
- "pending=%d)\n", (int)inftl, pendingblock);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p,"
+ "pending=%d)\n", inftl, pendingblock);
for (chain = 0; chain < inftl->nb_blocks; chain++) {
EUN = inftl->VUtable[chain];
size_t retlen;
int silly, silly2 = 3;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=0x%x,"
- "block=%d)\n", (int)inftl, block);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p,"
+ "block=%d)\n", inftl, block);
do {
/*
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=0x%x,"
- "thisVUC=%d)\n", (int)inftl, thisVUC);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p,"
+ "thisVUC=%d)\n", inftl, thisVUC);
memset(BlockUsed, 0, sizeof(BlockUsed));
memset(BlockDeleted, 0, sizeof(BlockDeleted));
size_t retlen;
struct inftl_bci bci;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=0x%x,"
- "block=%d)\n", (int)inftl, block);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p,"
+ "block=%d)\n", inftl, block);
while (thisEUN < inftl->nb_blocks) {
if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
struct inftl_oob oob;
char *p, *pend;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=0x%x,block=%ld,"
- "buffer=0x%x)\n", (int)inftl, block, (int)buffer);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld,"
+ "buffer=%p)\n", inftl, block, buffer);
/* Is block all zero? */
pend = buffer + SECTORSIZE;
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=0x%x,block=%ld,"
- "buffer=0x%x)\n", (int)inftl, block, (int)buffer);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
+ "buffer=%p)\n", inftl, block, buffer);
while (thisEUN < inftl->nb_blocks) {
if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
u8 buf[SECTORSIZE];
struct INFTLMediaHeader *mh = &inftl->MediaHdr;
struct INFTLPartition *ip;
- int retlen;
+ size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=0x%x)\n",
- (int)inftl);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
/*
* Assume logical EraseSize == physical erasesize for starting the
inftl->PUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
if (!inftl->PUtable) {
printk(KERN_WARNING "INFTL: allocation of PUtable "
- "failed (%d bytes)\n",
+ "failed (%zd bytes)\n",
inftl->nb_blocks * sizeof(u16));
return -ENOMEM;
}
if (!inftl->VUtable) {
kfree(inftl->PUtable);
printk(KERN_WARNING "INFTL: allocation of VUtable "
- "failed (%d bytes)\n",
+ "failed (%zd bytes)\n",
inftl->nb_blocks * sizeof(u16));
return -ENOMEM;
}
static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
int len, int check_oob)
{
- int i, retlen;
u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize];
+ size_t retlen;
+ int i;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: check_free_sectors(inftl=0x%x,"
- "address=0x%x,len=%d,check_oob=%d)\n", (int)inftl,
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: check_free_sectors(inftl=%p,"
+ "address=0x%x,len=%d,check_oob=%d)\n", inftl,
address, len, check_oob);
for (i = 0; i < len; i += SECTORSIZE) {
*/
int INFTL_formatblock(struct INFTLrecord *inftl, int block)
{
- int retlen;
+ size_t retlen;
struct inftl_unittail uci;
struct erase_info *instr = &inftl->instr;
int physblock;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=0x%x,"
- "block=%d)\n", (int)inftl, block);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p,"
+ "block=%d)\n", inftl, block);
memset(instr, 0, sizeof(struct erase_info));
int chain_length, do_format_chain;
struct inftl_unithead1 h0;
struct inftl_unittail h1;
- int i, retlen;
+ size_t retlen;
+ int i;
u8 *ANACtable, ANAC;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=0x%x)\n", (int)s);
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s);
/* Search for INFTL MediaHeader and Spare INFTL Media Header */
if (find_boot_record(s) < 0) {
info->mtd->unlock = ichxrom_unlock;
}
if (info->mtd->size > info->map.size) {
- printk(KERN_WARNING MOD_NAME " rom(%u) larger than window(%u). fixing...\n",
+ printk(KERN_WARNING MOD_NAME " rom(%u) larger than window(%lu). fixing...\n",
info->mtd->size, info->map.size);
info->mtd->size = info->map.size;
}
}
default:
- DEBUG(MTD_DEBUG_LEVEL0, "Invalid ioctl %x (MEMGETINFO = %x)\n", cmd, MEMGETINFO);
+ DEBUG(MTD_DEBUG_LEVEL0, "Invalid ioctl %x (MEMGETINFO = %lx)\n", cmd, (unsigned long)MEMGETINFO);
ret = -ENOTTY;
}
/*
- * $Id: mtdcore.c,v 1.42 2004/07/13 10:21:13 dwmw2 Exp $
+ * $Id: mtdcore.c,v 1.43 2004/07/23 15:20:46 dwmw2 Exp $
*
* Core registration and callback routines for MTD
* drivers and users.
*
*/
-#include <linux/version.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
* Derived from drivers/mtd/spia.c
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
*
- * $Id: autcpu12.c,v 1.19 2004/07/12 15:02:15 dwmw2 Exp $
+ * $Id: autcpu12.c,v 1.20 2004/07/20 02:44:26 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
*/
static struct mtd_info *autcpu12_mtd = NULL;
-/*
- * Module stuff
- */
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define autcpu12_init init_module
-#define autcpu12_cleanup cleanup_module
-#endif
-
static int autcpu12_io_base = CS89712_VIRT_BASE;
static int autcpu12_fio_pbase = AUTCPU12_PHYS_SMC;
static int autcpu12_fio_ctrl = AUTCPU12_SMC_SELECT_OFFSET;
*
* Interface to generic NAND code for M-Systems DiskOnChip devices
*
- * $Id: diskonchip.c,v 1.23 2004/07/13 00:14:35 dbrown Exp $
+ * $Id: diskonchip.c,v 1.25 2004/07/16 13:54:27 dbrown Exp $
*/
#include <linux/kernel.h>
ReadDOC(docptr, ReadPipeInit);
for (i=0; i < len-1; i++)
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
/* Terminate read pipeline */
buf[i] = ReadDOC(docptr, LastDataRead);
{
struct nand_chip *this = mtd->priv;
struct doc_priv *doc = (void *)this->priv;
- int offs, end = (MAX_MEDIAHEADER_SCAN << this->phys_erase_shift);
- int ret, retlen;
+ unsigned offs, end = (MAX_MEDIAHEADER_SCAN << this->phys_erase_shift);
+ int ret;
+ size_t retlen;
end = min(end, mtd->size); // paranoia
for (offs = 0; offs < end; offs += mtd->erasesize) {
struct doc_priv *doc = (void *)this->priv;
u_char *buf = this->data_buf;
struct NFTLMediaHeader *mh = (struct NFTLMediaHeader *) buf;
- const int psize = 1 << this->page_shift;
- int blocks, maxblocks;
+ const unsigned psize = 1 << this->page_shift;
+ unsigned blocks, maxblocks;
int offs, numheaders;
if (!(numheaders=find_media_headers(mtd, buf, "ANAND", 1))) return 0;
//#endif
blocks = mtd->size >> this->phys_erase_shift;
- maxblocks = min(32768, mtd->erasesize - psize);
+ maxblocks = min(32768U, mtd->erasesize - psize);
if (mh->UnitSizeFactor == 0x00) {
/* Auto-determine UnitSizeFactor. The constraints are:
mh->UnitSizeFactor = 0xff;
while (blocks > maxblocks) {
blocks >>= 1;
- maxblocks = min(32768, (maxblocks << 1) + psize);
+ maxblocks = min(32768U, (maxblocks << 1) + psize);
mh->UnitSizeFactor--;
}
printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
blocks = mtd->size >> this->bbt_erase_shift;
- maxblocks = min(32768, mtd->erasesize - psize);
+ maxblocks = min(32768U, mtd->erasesize - psize);
}
if (blocks > maxblocks) {
return -EIO;
}
- if (mtd->size == (8<<20)) {
-#if 0
-/* This doesn't seem to work for me. I get ECC errors on every page. */
- /* The Millennium 8MiB is actually an NFTL device! */
- mtd->name = "DiskOnChip Millennium 8MiB (NFTL)";
- return nftl_scan_bbt(mtd);
-#endif
- printk(KERN_ERR "DiskOnChip Millennium 8MiB is not supported.\n");
- return -EIO;
- }
-
this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
NAND_BBT_VERSION;
if (inftl_bbt_write)
this->write_buf = doc2001_writebuf;
this->read_buf = doc2001_readbuf;
this->verify_buf = doc2001_verifybuf;
- this->scan_bbt = inftl_scan_bbt;
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
can have multiple chips. */
doc2000_count_chips(mtd);
mtd->name = "DiskOnChip 2000 (INFTL Model)";
+ this->scan_bbt = inftl_scan_bbt;
return (4 * doc->chips_per_floor);
} else {
/* Bog-standard Millennium */
doc->chips_per_floor = 1;
mtd->name = "DiskOnChip Millennium";
+ this->scan_bbt = nftl_scan_bbt;
return 1;
}
}
}
for (mtd = doclist; mtd; mtd = doc->nextdoc) {
+ unsigned char oldval;
+ unsigned char newval;
nand = mtd->priv;
doc = (void *)nand->priv;
/* Use the alias resolution register to determine if this is
in fact the same DOC aliased to a new address. If writes
to one chip's alias resolution register change the value on
the other chip, they're the same chip. */
- unsigned char oldval = ReadDOC(doc->virtadr, AliasResolution);
- unsigned char newval = ReadDOC(virtadr, AliasResolution);
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ newval = ReadDOC(virtadr, AliasResolution);
if (oldval != newval)
continue;
WriteDOC(~newval, virtadr, AliasResolution);
* Derived from drivers/mtd/autcpu12.c
* Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
*
- * $Id: tx4925ndfmc.c,v 1.2 2004/03/27 19:55:53 gleixner Exp $
+ * $Id: tx4925ndfmc.c,v 1.3 2004/07/20 02:44:26 dwmw2 Exp $
*
* Copyright (C) 2001 Toshiba Corporation
*
*/
static struct mtd_info *tx4925ndfmc_mtd = NULL;
-/*
- * Module stuff
- */
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define tx4925ndfmc_init init_module
-#define tx4925ndfmc_cleanup cleanup_module
-#endif
-
/*
* Define partitions for flash devices
*/
outb(PKT_BUF_SZ >> 8, ioaddr + TxFreeThreshold); /* Room for a packet. */
/* Clear the Tx ring. */
for (i = 0; i < TX_RING_SIZE; i++)
- vp->tx_skbuff[i] = 0;
+ vp->tx_skbuff[i] = NULL;
outl(0, ioaddr + DownListPtr);
}
/* Set receiver mode: presumably accept b-case and phys addr only. */
break; /* It still hasn't been processed. */
if (lp->tx_skbuff[entry]) {
dev_kfree_skb_irq(lp->tx_skbuff[entry]);
- lp->tx_skbuff[entry] = 0;
+ lp->tx_skbuff[entry] = NULL;
}
dirty_tx++;
}
for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) {
dev_kfree_skb(vp->rx_skbuff[i]);
- vp->rx_skbuff[i] = 0;
+ vp->rx_skbuff[i] = NULL;
}
}
if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
for (i = 0; i < TX_RING_SIZE; i++)
if (vp->tx_skbuff[i]) {
dev_kfree_skb(vp->tx_skbuff[i]);
- vp->tx_skbuff[i] = 0;
+ vp->tx_skbuff[i] = NULL;
}
}
#endif
unsigned long flags;
- MPU_PORT(dev, PORT_RESET, 0);
+ MPU_PORT(dev, PORT_RESET, NULL);
udelay(100); /* Wait 100us - seems to help */
failed:
printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
- MPU_PORT(dev, PORT_RESET, 0);
+ MPU_PORT(dev, PORT_RESET, NULL);
return -1;
}
config OAKNET
tristate "National DP83902AV (Oak ethernet) support"
- depends on NET_ETHERNET && PPC
+ depends on NET_ETHERNET && PPC && BROKEN
select CRC32
help
Say Y if your machine has this type of Ethernet network card.
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
-config GIANFAR
- tristate "Gianfar Ethernet"
- depends on 85xx
- help
- This driver supports the Gigabit TSEC on the MPC85xx
- family of chips, and the FEC on the 8540
-
-config GFAR_NAPI
- bool "NAPI Support"
- depends on GIANFAR
-
endmenu
#
obj-$(CONFIG_IBM_EMAC) += ibm_emac/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_BONDING) += bonding/
-obj-$(CONFIG_GIANFAR) += gianfar.o gianfar_ethtool.o gianfar_phy.o
#
# link order important here
*/
#define ACE_MINI_SIZE 100
-#define ACE_MINI_BUFSIZE (ACE_MINI_SIZE + 2 + 16)
-#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 2+4+16)
-#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 2+4+16)
+#define ACE_MINI_BUFSIZE ACE_MINI_SIZE
+#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
+#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
/*
* There seems to be a magic difference in the effect between 995 and 996
ringp = &ap->skb->rx_std_skbuff[i];
mapping = pci_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
- ACE_STD_BUFSIZE - (2 + 16),
+ ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_std_ring[i].size = 0;
ringp = &ap->skb->rx_mini_skbuff[i];
mapping = pci_unmap_addr(ringp,mapping);
pci_unmap_page(ap->pdev, mapping,
- ACE_MINI_BUFSIZE - (2 + 16),
+ ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_mini_ring[i].size = 0;
ringp = &ap->skb->rx_jumbo_skbuff[i];
mapping = pci_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
- ACE_JUMBO_BUFSIZE - (2 + 16),
+ ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_jumbo_ring[i].size = 0;
set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
- info->rx_std_ctrl.max_len = ACE_STD_MTU + ETH_HLEN + 4;
+ info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
info->rx_std_ctrl.flags =
RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_STD_BUFSIZE, GFP_ATOMIC);
+ skb = alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
if (!skb)
break;
- /*
- * Make sure IP header starts on a fresh cache line.
- */
- skb_reserve(skb, 2 + 16);
+ skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
- ACE_STD_BUFSIZE - (2 + 16),
+ ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
rd = &ap->rx_std_ring[idx];
set_aceaddr(&rd->addr, mapping);
- rd->size = ACE_STD_MTU + ETH_HLEN + 4;
+ rd->size = ACE_STD_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_STD_RING_ENTRIES;
}
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_MINI_BUFSIZE, GFP_ATOMIC);
+ skb = alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
if (!skb)
break;
- /*
- * Make sure the IP header ends up on a fresh cache line
- */
- skb_reserve(skb, 2 + 16);
+ skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
- ACE_MINI_BUFSIZE - (2 + 16),
+ ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
rd = &ap->rx_mini_ring[idx];
set_aceaddr(&rd->addr, mapping);
- rd->size = ACE_MINI_SIZE;
+ rd->size = ACE_MINI_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_MINI_RING_ENTRIES;
}
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC);
+ skb = alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
if (!skb)
break;
- /*
- * Make sure the IP header ends up on a fresh cache line
- */
- skb_reserve(skb, 2 + 16);
+ skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
- ACE_JUMBO_BUFSIZE - (2 + 16),
+ ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
rd = &ap->rx_jumbo_ring[idx];
set_aceaddr(&rd->addr, mapping);
- rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4;
+ rd->size = ACE_JUMBO_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
}
*/
case 0:
rip = &ap->skb->rx_std_skbuff[skbidx];
- mapsize = ACE_STD_BUFSIZE - (2 + 16);
+ mapsize = ACE_STD_BUFSIZE;
rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
rip = &ap->skb->rx_jumbo_skbuff[skbidx];
- mapsize = ACE_JUMBO_BUFSIZE - (2 + 16);
+ mapsize = ACE_JUMBO_BUFSIZE;
rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
rip = &ap->skb->rx_mini_skbuff[skbidx];
- mapsize = ACE_MINI_BUFSIZE - (2 + 16);
+ mapsize = ACE_MINI_BUFSIZE;
rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
#define tigonFwBssAddr 0x00015dd0
#define tigonFwBssLen 0x2080
#ifdef CONFIG_ACENIC_OMIT_TIGON_I
-#define tigonFwText 0
-#define tigonFwData 0
-#define tigonFwRodata 0
+#define tigonFwText NULL
+#define tigonFwData NULL
+#define tigonFwRodata NULL
#else
/* Generated by genfw.c */
static u32 tigonFwText[(MAX_TEXT_LEN/4) + 1] __initdata = {
lp->tx_skbuff[tx_index]->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
- lp->tx_skbuff[tx_index] = 0;
+ lp->tx_skbuff[tx_index] = NULL;
lp->tx_dma_addr[tx_index] = 0;
}
lp->tx_complete_idx++;
if( dev->mc_count == 0 ){
/* get only own packets */
mc_filter[1] = mc_filter[0] = 0;
- lp->mc_list = 0;
+ lp->mc_list = NULL;
lp->options &= ~OPTION_MULTICAST_ENABLE;
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
/* disable promiscous mode */
return 0;
}
-#ifdef CONFIG_NET_FASTROUTE
-static int bond_accept_fastpath(struct net_device *bond_dev, struct dst_entry *dst)
-{
- return -1;
-}
-#endif
-
/*------------------------- Device initialization ---------------------------*/
/*
bond_set_mode_ops(bond_dev, bond->params.mode);
bond_dev->destructor = free_netdev;
-#ifdef CONFIG_NET_FASTROUTE
- bond_dev->accept_fastpath = bond_accept_fastpath;
-#endif
/* Initialize the device options */
bond_dev->tx_queue_len = 0;
{
if (lp->dma_buff) {
free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024));
- lp->dma_buff = 0;
+ lp->dma_buff = NULL;
}
}
#endif
* $Id: asstruct.h,v 1.1.1.1 1994/10/23 05:08:32 rick Exp $
*/
-#if ASSEMBLER
+#ifdef ASSEMBLER
# define MO(t,a) (a)
# define VMO(t,a) (a)
/************************************************************************/
typedef volatile struct _I596_RBD
{
-#if INTEL_RETENTIVE
+#ifdef INTEL_RETENTIVE
ushort count; /* Length of data in buf */
ushort offset;
#else
#endif
vol struct _I596_RBD *next; /* Next buffer descriptor in list */
uchar *buf; /* Data buffer */
-#if INTEL_RETENTIVE
+#ifdef INTEL_RETENTIVE
ushort size; /* Size of buf (constant) */
ushort zero;
#else
/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_skbuff[i] = 0;
+ np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = cpu_to_le64 (TFDDone);
np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
((i+1)%TX_RING_SIZE) *
sizeof (struct netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
- np->rx_skbuff[i] = 0;
+ np->rx_skbuff[i] = NULL;
}
/* Allocate the rx buffers */
else
dev_kfree_skb (skb);
- np->tx_skbuff[entry] = 0;
+ np->tx_skbuff[entry] = NULL;
entry = (entry + 1) % TX_RING_SIZE;
tx_use++;
}
pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo,
skb->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb (skb);
- np->rx_skbuff[i] = 0;
+ np->rx_skbuff[i] = NULL;
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb (skb);
- np->tx_skbuff[i] = 0;
+ np->tx_skbuff[i] = NULL;
}
}
{
}
-#ifdef CONFIG_NET_FASTROUTE
-static int dummy_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
-{
- return -1;
-}
-#endif
-
static void __init dummy_setup(struct net_device *dev)
{
/* Initialize the device structure. */
dev->hard_start_xmit = dummy_xmit;
dev->set_multicast_list = set_multicast_list;
dev->set_mac_address = dummy_set_address;
-#ifdef CONFIG_NET_FASTROUTE
- dev->accept_fastpath = dummy_accept_fastpath;
-#endif
/* Fill in device structure with ethernet-generic values. */
ether_setup(dev);
#include "e1000_hw.h"
-#if DBG
+#ifdef DBG
#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
#else
#define E1000_DBG(args...)
#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
-#if DBG
+#ifdef DBG
#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
#else
/* Set up the Tx queue early.. */
sp->cur_tx = 0;
sp->dirty_tx = 0;
- sp->last_cmd = 0;
+ sp->last_cmd = NULL;
sp->tx_full = 0;
sp->in_interrupt = 0;
le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(sp->tx_skbuff[entry]);
- sp->tx_skbuff[entry] = 0;
+ sp->tx_skbuff[entry] = NULL;
}
sp->dirty_tx++;
}
le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(sp->tx_skbuff[entry]);
- sp->tx_skbuff[entry] = 0;
+ sp->tx_skbuff[entry] = NULL;
}
dirty_tx++;
}
/* Free all the skbuffs in the Rx and Tx queues. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = sp->rx_skbuff[i];
- sp->rx_skbuff[i] = 0;
+ sp->rx_skbuff[i] = NULL;
/* Clear the Rx descriptors. */
if (skb) {
pci_unmap_single(sp->pdev,
for (i = 0; i < TX_RING_SIZE; i++) {
struct sk_buff *skb = sp->tx_skbuff[i];
- sp->tx_skbuff[i] = 0;
+ sp->tx_skbuff[i] = NULL;
/* Clear the Tx descriptors. */
if (skb) {
pci_unmap_single(sp->pdev,
last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
- sp->tx_skbuff[entry] = 0; /* Redundant. */
+ sp->tx_skbuff[entry] = NULL; /* Redundant. */
sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
sp->tx_ring[entry].link =
cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
- sp->tx_skbuff[entry] = 0;
+ sp->tx_skbuff[entry] = NULL;
sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
sp->tx_ring[entry].link =
cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
sp->last_cmd = mc_setup_frm;
/* Change the command to a NoOp, pointing to the CmdMulti command. */
- sp->tx_skbuff[entry] = 0;
+ sp->tx_skbuff[entry] = NULL;
sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
ep->rx_ring[i].next = ep->rx_ring_dma +
(i+1)*sizeof(struct epic_rx_desc);
- ep->rx_skbuff[i] = 0;
+ ep->rx_skbuff[i] = NULL;
}
/* Mark the last entry as wrapping the ring. */
ep->rx_ring[i-1].next = ep->rx_ring_dma;
/* The Tx buffer descriptor is filled in as needed, but we
do need to clear the ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
- ep->tx_skbuff[i] = 0;
+ ep->tx_skbuff[i] = NULL;
ep->tx_ring[i].txstatus = 0x0000;
ep->tx_ring[i].next = ep->tx_ring_dma +
(i+1)*sizeof(struct epic_tx_desc);
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- ep->tx_skbuff[entry] = 0;
+ ep->tx_skbuff[entry] = NULL;
}
#ifndef final_version
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
skb = ep->rx_skbuff[i];
- ep->rx_skbuff[i] = 0;
+ ep->rx_skbuff[i] = NULL;
ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
ep->rx_ring[i].buflength = 0;
if (skb) {
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = ep->tx_skbuff[i];
- ep->tx_skbuff[i] = 0;
+ ep->tx_skbuff[i] = NULL;
if (!skb)
continue;
pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
{
if (!eql_is_full(queue)) {
- slave_t *duplicate_slave = 0;
+ slave_t *duplicate_slave = NULL;
duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
if (duplicate_slave != 0)
#include <linux/config.h>
#include <linux/module.h>
-
+#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
static int ethertap_debug;
static int max_taps = 1;
-MODULE_PARM(max_taps, "i");
+module_param(max_taps, int, 0);
MODULE_PARM_DESC(max_taps,"Max number of ethernet tap devices");
static struct net_device **tap_map; /* Returns the tap device for a given netlink */
hmp->tx_ring[entry].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- hmp->tx_skbuff[entry] = 0;
+ hmp->tx_skbuff[entry] = NULL;
}
hmp->tx_ring[entry].status_n_length = 0;
if (entry >= TX_RING_SIZE-1)
pci_unmap_single(hmp->pci_dev, hmp->tx_ring[i].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- hmp->tx_skbuff[i] = 0;
+ hmp->tx_skbuff[i] = NULL;
}
}
pci_unmap_single(hmp->pci_dev, hmp->rx_ring[i].addr,
hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- hmp->rx_skbuff[i] = 0;
+ hmp->rx_skbuff[i] = NULL;
}
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
/* Initialize all Rx descriptors. */
for (i = 0; i < RX_RING_SIZE; i++) {
hmp->rx_ring[i].status_n_length = 0;
- hmp->rx_skbuff[i] = 0;
+ hmp->rx_skbuff[i] = NULL;
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
for (i = 0; i < TX_RING_SIZE; i++) {
- hmp->tx_skbuff[i] = 0;
+ hmp->tx_skbuff[i] = NULL;
hmp->tx_ring[i].status_n_length = 0;
}
/* Mark the last entry of the ring */
skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- hmp->tx_skbuff[entry] = 0;
+ hmp->tx_skbuff[entry] = NULL;
}
hmp->tx_ring[entry].status_n_length = 0;
if (entry >= TX_RING_SIZE-1)
hmp->rx_ring[i].addr, hmp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- hmp->rx_skbuff[i] = 0;
+ hmp->rx_skbuff[i] = NULL;
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
hmp->tx_ring[i].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- hmp->tx_skbuff[i] = 0;
+ hmp->tx_skbuff[i] = NULL;
}
}
write_lock(&disc_data_lock);
sp = tty->disc_data;
- tty->disc_data = 0;
+ tty->disc_data = NULL;
write_unlock(&disc_data_lock);
if (sp == 0)
return;
unregister_netdev(sp->dev);
}
-static int sp_set_mac_address(struct net_device *dev, void *addr)
+static int sp_set_mac_address(struct net_device *dev, void __user *addr)
{
return copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN) ? -EFAULT : 0;
}
switch(cmd) {
case SIOCGIFNAME:
- err = copy_to_user((void *) arg, sp->dev->name,
+ err = copy_to_user((void __user *) arg, sp->dev->name,
strlen(sp->dev->name) + 1) ? -EFAULT : 0;
break;
case SIOCGIFENCAP:
- err = put_user(0, (int *)arg);
+ err = put_user(0, (int __user *)arg);
break;
case SIOCSIFENCAP:
- if (get_user(tmp, (int *) arg)) {
+ if (get_user(tmp, (int __user *) arg)) {
err = -EFAULT;
break;
}
break;
case SIOCSIFHWADDR:
- err = sp_set_mac_address(sp->dev, (void *) arg);
+ err = sp_set_mac_address(sp->dev, (void __user *) arg);
break;
/* Allow stty to read, but not set, the serial port */
unregister_netdev(ax->dev);
- tty->disc_data = 0;
+ tty->disc_data = NULL;
ax->tty = NULL;
ax_free(ax);
}
-static int ax_set_mac_address(struct net_device *dev, void *addr)
+static int ax_set_mac_address(struct net_device *dev, void __user *addr)
{
if (copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN))
return -EFAULT;
/* Perform I/O control on an active ax25 channel. */
-static int ax25_disp_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
+static int ax25_disp_ioctl(struct tty_struct *tty, void *file, int cmd, void __user *arg)
{
struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
unsigned int tmp;
return 0;
case SIOCGIFENCAP:
- return put_user(4, (int *)arg);
+ return put_user(4, (int __user *)arg);
case SIOCSIFENCAP:
- if (get_user(tmp, (int *)arg))
+ if (get_user(tmp, (int __user *)arg))
return -EFAULT;
ax->mode = tmp;
ax->dev->addr_len = AX25_ADDR_LEN; /* sizeof an AX.25 addr */
* ------------------
*
* You can find a subset of the documentation in
- * linux/Documentation/networking/z8530drv.txt.
+ * Documentation/networking/z8530drv.txt.
*/
/*
config TOSHIBA_FIR
tristate "Toshiba Type-O IR Port"
- depends on IRDA && !64BIT
+ depends on IRDA && PCI && !64BIT
help
Say Y here if you want to build support for the Toshiba Type-O IR
and Donau oboe chipsets. These chipsets are used by the Toshiba
config VIA_FIR
tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
- depends on IRDA && ISA
+ depends on IRDA && ISA && PCI
help
Say Y here if you want to build support for the VIA VT8231
and VIA VT1211 IrDA controllers, found on the motherboards using
/* Delay a few ms just to allow the reset to complete */
msec_delay(IXGB_DELAY_AFTER_RESET);
ctrl_reg = IXGB_READ_REG(hw, CTRL0);
-#if DBG
+#ifdef DBG
/* Make sure the self-clearing global reset bit did self clear */
ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
#endif
#define ASSERT(x) if(!(x)) BUG()
#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
-#if DBG
+#ifdef DBG
#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
#else
/* Free all the skbuffs in the Rx and Tx queues. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = lp->rx_skbuff[i];
- lp->rx_skbuff[i] = 0;
+ lp->rx_skbuff[i] = NULL;
lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
if (skb)
dev_kfree_skb_any(skb);
/* The Tx buffer address is filled in as needed, but we do need to clear
the upper ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
- lp->tx_skbuff[i] = 0;
+ lp->tx_skbuff[i] = NULL;
lp->tx_ring[i].base = 0;
}
in the bounce buffer. */
if (lp->tx_skbuff[entry]) {
dev_kfree_skb_irq(lp->tx_skbuff[entry]);
- lp->tx_skbuff[entry] = 0;
+ lp->tx_skbuff[entry] = NULL;
}
dirty_tx++;
}
kfree(rfd);
} while (rfd != lp->rx_tail);
- lp->rx_tail = 0;
+ lp->rx_tail = NULL;
#if 0
for (lp->rbd_list) {
for (i = 0; i < N_RX_RING; ++i) {
if (mp->rx_bufs[i] != 0) {
dev_kfree_skb(mp->rx_bufs[i]);
- mp->rx_bufs[i] = 0;
+ mp->rx_bufs[i] = NULL;
}
}
for (i = mp->tx_empty; i != mp->tx_fill; ) {
cp->xfer_status = 0;
++cp;
}
- mp->rx_bufs[i] = 0;
+ mp->rx_bufs[i] = NULL;
st_le16(&cp->command, DBDMA_STOP);
mp->rx_fill = i;
mp->rx_empty = 0;
mp->stats.rx_bytes += skb->len;
netif_rx(skb);
dev->last_rx = jiffies;
- mp->rx_bufs[i] = 0;
+ mp->rx_bufs[i] = NULL;
++mp->stats.rx_packets;
}
} else {
static int __init myri_sbus_probe(void)
{
struct sbus_bus *bus;
- struct sbus_dev *sdev = 0;
+ struct sbus_dev *sdev = NULL;
static int called;
int cards = 0, v;
{"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
{"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
{"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
- {0,}
+ {NULL,}
};
#endif
static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
MODULE_LICENSE("GPL");
-#ifdef MODULE_PARM
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(bad, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM_DESC(io, "(ignored)");
MODULE_PARM_DESC(irq, "(ignored)");
MODULE_PARM_DESC(bad, "(ignored)");
-#endif
/* Module code fixed by David Weinehall */
frag = skb_shinfo(skb)->frags;
if (!nr_frags)
- frag = 0;
+ frag = NULL;
extsts = 0;
if (skb->ip_summed == CHECKSUM_HW) {
extsts |= EXTSTS_IPPKT;
Modified from Am79C90 data sheet.
---------------------------------------------------------------------------- */
-#if BROKEN_MULTICAST
+#ifdef BROKEN_MULTICAST
static void updateCRC(int *CRC, int bit)
{
write_lock_irq(&disc_data_lock);
ap = tty->disc_data;
- tty->disc_data = 0;
+ tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock);
if (ap == 0)
return;
ap->olim = buf;
kfree_skb(ap->tpkt);
- ap->tpkt = 0;
+ ap->tpkt = NULL;
return 1;
}
clear_bit(XMIT_BUSY, &ap->xmit_flags);
if (ap->tpkt != 0) {
kfree_skb(ap->tpkt);
- ap->tpkt = 0;
+ ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
ap->optr = ap->olim;
if (ap->tpkt != NULL) {
kfree_skb(ap->tpkt);
- ap->tpkt = 0;
+ ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
/* queue the frame to be processed */
skb->cb[0] = ap->state;
skb_queue_tail(&ap->rqueue, skb);
- ap->rpkt = 0;
+ ap->rpkt = NULL;
ap->state = 0;
return;
struct ppp *ppp;
if (pf != 0) {
- file->private_data = 0;
+ file->private_data = NULL;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
if (file == ppp->owner)
struct ppp_file *pf = file->private_data;
DECLARE_WAITQUEUE(wait, current);
ssize_t ret;
- struct sk_buff *skb = 0;
+ struct sk_buff *skb = NULL;
ret = count;
/* check if we should pass this packet */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
- {
- u_int16_t *p = (u_int16_t *) skb_push(skb, 2);
-
- *p = htons(4); /* indicate outbound in DLT_LINUX_SLL */;
- }
+ *skb_push(skb, 2) = 1;
if (ppp->pass_filter
&& sk_run_filter(skb, ppp->pass_filter,
ppp->pass_len) == 0) {
list = &ppp->channels;
if (list_empty(list)) {
/* nowhere to send the packet, just drop it */
- ppp->xmit_pending = 0;
+ ppp->xmit_pending = NULL;
kfree_skb(skb);
return;
}
spin_lock_bh(&pch->downl);
if (pch->chan) {
if (pch->chan->ops->start_xmit(pch->chan, skb))
- ppp->xmit_pending = 0;
+ ppp->xmit_pending = NULL;
} else {
/* channel got unregistered */
kfree_skb(skb);
- ppp->xmit_pending = 0;
+ ppp->xmit_pending = NULL;
}
spin_unlock_bh(&pch->downl);
return;
return;
#endif /* CONFIG_PPP_MULTILINK */
- ppp->xmit_pending = 0;
+ ppp->xmit_pending = NULL;
kfree_skb(skb);
}
/* check if the packet passes the pass and active filters */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
- {
- u_int16_t *p = (u_int16_t *) skb_push(skb, 2);
-
- *p = 0; /* indicate inbound in DLT_LINUX_SLL */
- }
+ *skb_push(skb, 2) = 0;
if (ppp->pass_filter
&& sk_run_filter(skb, ppp->pass_filter,
ppp->pass_len) == 0) {
if (pch == 0)
return; /* should never happen */
- chan->ppp = 0;
+ chan->ppp = NULL;
/*
* This ensures that we have returned from any calls into the
*/
down_write(&pch->chan_sem);
spin_lock_bh(&pch->downl);
- pch->chan = 0;
+ pch->chan = NULL;
spin_unlock_bh(&pch->downl);
up_write(&pch->chan_sem);
ppp_disconnect_channel(pch);
ppp->xstate = 0;
xcomp = ppp->xcomp;
xstate = ppp->xc_state;
- ppp->xc_state = 0;
+ ppp->xc_state = NULL;
ppp->rstate = 0;
rcomp = ppp->rcomp;
rstate = ppp->rc_state;
- ppp->rc_state = 0;
+ ppp->rc_state = NULL;
ppp_unlock(ppp);
if (xstate) {
if (ce->comp->compress_proto == proto)
return ce;
}
- return 0;
+ return NULL;
}
/* Register a compressor */
find_compressor(int type)
{
struct compressor_entry *ce;
- struct compressor *cp = 0;
+ struct compressor *cp = NULL;
spin_lock(&compressor_list_lock);
ce = find_comp_entry(type);
down(&all_ppp_sem);
ppp_lock(ppp);
dev = ppp->dev;
- ppp->dev = 0;
+ ppp->dev = NULL;
ppp_unlock(ppp);
/* This will call dev_close() for us. */
if (dev) {
ppp_ccp_closed(ppp);
if (ppp->vj) {
slhc_free(ppp->vj);
- ppp->vj = 0;
+ ppp->vj = NULL;
}
skb_queue_purge(&ppp->file.xq);
skb_queue_purge(&ppp->file.rq);
}
if (ppp->active_filter) {
kfree(ppp->active_filter);
- ppp->active_filter = 0;
+ ppp->active_filter = NULL;
}
#endif /* CONFIG_PPP_FILTER */
if (pch->file.index == unit)
return pch;
}
- return 0;
+ return NULL;
}
/*
write_lock_irq(&disc_data_lock);
ap = tty->disc_data;
- tty->disc_data = 0;
+ tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock);
if (ap == 0)
return;
tty_stuffed = 1;
} else {
kfree_skb(ap->tpkt);
- ap->tpkt = 0;
+ ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
flush:
if (ap->tpkt != 0) {
kfree_skb(ap->tpkt);
- ap->tpkt = 0;
+ ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
spin_lock_bh(&ap->xmit_lock);
if (ap->tpkt != NULL) {
kfree_skb(ap->tpkt);
- ap->tpkt = 0;
+ ap->tpkt = NULL;
clear_bit(XMIT_FULL, &ap->xmit_flags);
done = 1;
}
#define PPPOE_HASH_BITS 4
#define PPPOE_HASH_SIZE (1<<PPPOE_HASH_BITS)
+static struct ppp_channel_ops pppoe_chan_ops;
+
static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
skb ? session_id : session_id | 0x40, frame_id);
if (skb) {
dev_kfree_skb(skb);
- skb = 0;
+ skb = NULL;
}
good_frame:
dev->last_rx = jiffies;
stats->rx_bytes+=dlen;
stats->rx_packets++;
- lp->rx_skb[ns] = 0;
+ lp->rx_skb[ns] = NULL;
lp->rx_session_id[ns] |= 0x40;
return 0;
if (ns < NPIDS) {
if ((skb = lp->rx_skb[ns])) {
dev_kfree_skb(skb);
- lp->rx_skb[ns] = 0;
+ lp->rx_skb[ns] = NULL;
}
lp->rx_session_id[ns] |= 0x40;
}
return 0;
}
\f
+/*
+ * wait_for_buffer
+ *
+ * This routine waits for the SEEQ chip to assert that the FIFO is ready
+ * by checking for a window interrupt, and then clearing it. This has to
+ * occur in the interrupt handler!
+ */
+inline void wait_for_buffer(struct net_device * dev)
+{
+ int ioaddr = dev->base_addr;
+ unsigned long tmp;
+ int status;
+
+ tmp = jiffies + HZ;
+ while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
+ cpu_relax();
+
+ if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+}
+\f
/* The typical workload of the driver:
Handle the network interface interrupts. */
static irqreturn_t seeq8005_interrupt(int irq, void *dev_id, struct pt_regs * regs)
}
-/*
- * wait_for_buffer
- *
- * This routine waits for the SEEQ chip to assert that the FIFO is ready
- * by checking for a window interrupt, and then clearing it. This has to
- * occur in the interrupt handler!
- */
-inline void wait_for_buffer(struct net_device * dev)
-{
- int ioaddr = dev->base_addr;
- unsigned long tmp;
- int status;
-
- tmp = jiffies + HZ;
- while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
- cpu_relax();
-
- if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
- outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
-}
-
#ifdef MODULE
static struct net_device *dev_seeq;
{ "NS 83851 PHY", 0x2000, 0x5C20, MIX },
{ "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
{ "VIA 6103 PHY", 0x0101, 0x8f20, LAN },
- {0,},
+ {NULL,},
};
struct mii_phy {
sis_priv->tx_ring[i].bufptr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- sis_priv->tx_skbuff[i] = 0;
+ sis_priv->tx_skbuff[i] = NULL;
sis_priv->tx_ring[i].cmdsts = 0;
sis_priv->tx_ring[i].bufptr = 0;
sis_priv->stats.tx_dropped++;
sis_priv->rx_ring[i].bufptr,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- sis_priv->rx_skbuff[i] = 0;
+ sis_priv->rx_skbuff[i] = NULL;
}
}
for (i = 0; i < NUM_TX_DESC; i++) {
sis_priv->tx_ring[i].bufptr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- sis_priv->tx_skbuff[i] = 0;
+ sis_priv->tx_skbuff[i] = NULL;
}
}
for (i = 0 ; i < 6 ; i++, p++)
*p = canonical[*p] ;
}
- slot = 0 ;
+ slot = NULL;
for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
if (!tb->n) { /* not used */
if (!del && !slot) /* if !del save first free */
{
struct s_smt_tx_queue *queue ;
struct s_smt_fp_txd volatile *t1 ;
- struct s_smt_fp_txd volatile *t2=0 ;
+ struct s_smt_fp_txd volatile *t2 = NULL ;
SMbuf *mb ;
u_long tbctrl ;
int i ;
{
struct smt_para *pa ;
const struct s_p_tab *pt ;
- struct fddi_mib_m *mib_m = 0 ;
- struct fddi_mib_p *mib_p = 0 ;
+ struct fddi_mib_m *mib_m = NULL;
+ struct fddi_mib_p *mib_p = NULL;
int len ;
int plen ;
char *from ;
/*
* check special paras
*/
- swap = 0 ;
+ swap = NULL;
switch (para) {
case SMT_P10F0 :
case SMT_P10F1 :
char c ;
char *mib_addr ;
struct fddi_mib *mib ;
- struct fddi_mib_m *mib_m = 0 ;
- struct fddi_mib_a *mib_a = 0 ;
- struct fddi_mib_p *mib_p = 0 ;
+ struct fddi_mib_m *mib_m = NULL;
+ struct fddi_mib_a *mib_a = NULL;
+ struct fddi_mib_p *mib_p = NULL;
int mac ;
int path ;
int port ;
const struct s_p_tab *pt ;
for (pt = p_tab ; pt->p_num && pt->p_num != para ; pt++)
;
- return(pt->p_num ? pt : 0) ;
+ return(pt->p_num ? pt : NULL) ;
}
static int smt_mib_phys(struct s_smc *smc)
char *p ;
int len ;
int plen ;
- void *found = 0 ;
+ void *found = NULL;
SK_UNUSED(smc) ;
len -= plen ;
if (len < 0) {
DB_SMT("SMT : sm_to_para - length error %d\n",plen,0) ;
- return(0) ;
+ return NULL;
}
if ((plen & 3) && (para != SMT_P_ECHODATA)) {
DB_SMT("SMT : sm_to_para - odd length %d\n",plen,0) ;
- return(0) ;
+ return NULL;
}
if (found)
return(found) ;
}
- return(0) ;
+ return NULL;
}
#if 0
*/
/* Attention: don't initialize mib pointer here! */
/* It must be initialized during phase 2 */
- smc->y[port].mib = 0 ;
+ smc->y[port].mib = NULL;
mib->fddiSMTPORTIndexes[port] = port+INDEX_PORT ;
pm->fddiPORTIndex = port+INDEX_PORT ;
{ "SBACOMMAND",16, 0 } ,
{ "SBAAVAILABLE",17, 1, 0, 100 } ,
#endif
- { 0 }
+ { NULL }
} ;
/* Define maximum string size for values and keybuffer */
void smt_timer_init(struct s_smc *smc)
{
- smc->t.st_queue = 0 ;
+ smc->t.st_queue = NULL;
smc->t.st_fast.tm_active = FALSE ;
- smc->t.st_fast.tm_next = 0 ;
+ smc->t.st_fast.tm_next = NULL;
hwt_init(smc) ;
}
timer->tm_active = TRUE ;
if (!smc->t.st_queue) {
smc->t.st_queue = timer ;
- timer->tm_next = 0 ;
+ timer->tm_next = NULL;
timer->tm_delta = time ;
hwt_start(smc,time) ;
return ;
done = 1 ;
}
}
- *last = 0 ;
+ *last = NULL;
next = smc->t.st_queue ;
smc->t.st_queue = tm ;
if (evc->evc_code == code && evc->evc_index == index)
return(evc) ;
}
- return(0) ;
+ return NULL;
}
#define THRESHOLD_2 (2*TICKS_PER_SECOND)
if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
return;
- tty->disc_data = 0;
+ tty->disc_data = NULL;
sl->tty = NULL;
if (!sl->leased)
sl->line = 0;
static int __init bigmac_probe(void)
{
struct sbus_bus *sbus;
- struct sbus_dev *sdev = 0;
+ struct sbus_dev *sdev = NULL;
static int called;
int cards = 0, v;
{"D-Link DFE-530TXS FAST Ethernet Adapter"},
{"D-Link DL10050-based FAST Ethernet Adapter"},
{"Sundance Technology Alta"},
- {0,}, /* 0 terminated list. */
+ {NULL,}, /* 0 terminated list. */
};
/* This driver was written to use PCI memory space, however x86-oriented
((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
np->rx_ring[i].status = 0;
np->rx_ring[i].frag[0].length = 0;
- np->rx_skbuff[i] = 0;
+ np->rx_skbuff[i] = NULL;
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_skbuff[i] = 0;
+ np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = 0;
}
return;
dev_kfree_skb_irq (skb);
else
dev_kfree_skb (skb);
- np->tx_skbuff[i] = 0;
+ np->tx_skbuff[i] = NULL;
np->stats.tx_dropped++;
}
}
np->tx_ring[entry].frag[0].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (np->tx_skbuff[entry]);
- np->tx_skbuff[entry] = 0;
+ np->tx_skbuff[entry] = NULL;
np->tx_ring[entry].frag[0].addr = 0;
np->tx_ring[entry].frag[0].length = 0;
}
np->tx_ring[entry].frag[0].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (np->tx_skbuff[entry]);
- np->tx_skbuff[entry] = 0;
+ np->tx_skbuff[entry] = NULL;
np->tx_ring[entry].frag[0].addr = 0;
np->tx_ring[entry].frag[0].length = 0;
}
np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
- np->rx_skbuff[i] = 0;
+ np->rx_skbuff[i] = NULL;
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_ring[i].frag[0].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
- np->tx_skbuff[i] = 0;
+ np->tx_skbuff[i] = NULL;
}
}
/* Let the chip settle down a bit, it seems that helps
* for sleep mode on some models
*/
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/100);
+ msleep(10);
/* Make sure we aren't polling PHY status change. We
* don't currently use that feature though
* dont wait a bit here, looks like the chip takes
* some time to really shut down
*/
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/100);
+ msleep(10);
}
writel(0, gp->regs + MAC_TXCFG);
lp->tx_new = TX_NEXT(entry);
}
-struct net_device *last_dev = 0;
+struct net_device *last_dev;
static int lance_open(struct net_device *dev)
{
static int __init sparc_lance_probe(void)
{
struct sbus_bus *bus;
- struct sbus_dev *sdev = 0;
- struct sbus_dma *ledma = 0;
+ struct sbus_dev *sdev = NULL;
+ struct sbus_dma *ledma = NULL;
static int called;
int cards = 0, v;
for_each_sbusdev (sdev, bus) {
if (strcmp(sdev->prom_name, "le") == 0) {
cards++;
- if ((v = sparc_lance_init(sdev, 0, 0)))
+ if ((v = sparc_lance_init(sdev, NULL, NULL)))
return v;
continue;
}
cards++;
ledma = find_ledma(sdev);
if ((v = sparc_lance_init(sdev->child,
- ledma, 0)))
+ ledma, NULL)))
return v;
continue;
}
if (strcmp(sdev->prom_name, "lebuffer") == 0){
cards++;
if ((v = sparc_lance_init(sdev->child,
- 0, sdev)))
+ NULL, sdev)))
return v;
continue;
}
{
struct net_device *dev = NULL;
struct sbus_bus *bus;
- struct sbus_dev *sdev = 0;
+ struct sbus_dev *sdev = NULL;
static int called;
int cards = 0, v;
/* restore 5701 hardware bug workaround flag */
tp->tg3_flags = flags_save;
+ /* Unfortunately, we have to delay before the PCI read back.
+ * Some 575X chips even will not respond to a PCI cfg access
+ * when the reset command is given to the chip.
+ *
+ * How do these hardware designers expect things to work
+ * properly if the PCI write is posted for a long period
+ * of time? It is always necessary to have some method by
+ * which a register read back can occur to push the write
+ * out which does the reset.
+ *
+ * For most tg3 variants the trick below was working.
+ * Ho hum...
+ */
+ udelay(120);
+
/* Flush PCI posted writes. The normal MMIO registers
* are inaccessible at this time so this is the only
- * way to make this reliably. I tried to use indirect
+ * way to make this reliably (actually, this is no longer
+ * the case, see above). I tried to use indirect
* register read/write but this upset some 5701 variants.
*/
pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
/* Define this to enable Link beat monitoring */
#undef MONITOR
-/* Turn on debugging. See linux/Documentation/networking/tlan.txt for details */
+/* Turn on debugging. See Documentation/networking/tlan.txt for details */
static int debug;
static int bbuf;
const char *media[] = {
"10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
- "100baseTx-FD", "100baseT4", 0
+ "100baseTx-FD", "100baseT4", NULL
};
int media_map[] = { 0x0020, 0x0040, 0x0080, 0x0100, 0x0200,};
config SMCTR
tristate "SMC ISA/MCA adapter support"
- depends on TR && (ISA || MCA_LEGACY)
+ depends on TR && (ISA || MCA_LEGACY) && (BROKEN || !64BIT)
---help---
This is support for the ISA and MCA SMC Token Ring cards,
specifically SMC TokenCard Elite (8115T) and SMC TokenCard Elite/A
spin_lock(&card->lock);
status = inl(card->io_port+CSR5);
-#if DEBUG
+#ifdef DEBUG
print_binary(status);
printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
xircom_init_ring(dev);
/* Clear the tx ring */
for (i = 0; i < TX_RING_SIZE; i++) {
- tp->tx_skbuff[i] = 0;
+ tp->tx_skbuff[i] = NULL;
tp->tx_ring[i].status = 0;
}
/* The Tx buffer descriptor is filled in as needed, but we
do need to clear the ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
- tp->tx_skbuff[i] = 0;
+ tp->tx_skbuff[i] = NULL;
tp->tx_ring[i].status = 0;
tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
#ifdef CARDBUS
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_skbuff[entry]);
- tp->tx_skbuff[entry] = 0;
+ tp->tx_skbuff[entry] = NULL;
}
#ifndef final_version
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = tp->rx_skbuff[i];
- tp->rx_skbuff[i] = 0;
+ tp->rx_skbuff[i] = NULL;
tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
tp->rx_ring[i].length = 0;
tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
for (i = 0; i < TX_RING_SIZE; i++) {
if (tp->tx_skbuff[i])
dev_kfree_skb(tp->tx_skbuff[i]);
- tp->tx_skbuff[i] = 0;
+ tp->tx_skbuff[i] = NULL;
}
tp->open = 0;
if (entry != 0) {
/* Avoid a chip errata by prefixing a dummy entry. */
- tp->tx_skbuff[entry] = 0;
+ tp->tx_skbuff[entry] = NULL;
tp->tx_ring[entry].length =
(entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
tp->tx_ring[entry].buffer1 = 0;
entry = tp->cur_tx++ % TX_RING_SIZE;
}
- tp->tx_skbuff[entry] = 0;
+ tp->tx_skbuff[entry] = NULL;
/* Put the setup frame on the Tx list. */
if (entry == TX_RING_SIZE - 1)
tx_flags |= Tx1RingWrap; /* Wrap ring. */
rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
next += sizeof(struct rx_desc);
rp->rx_ring[i].next_desc = cpu_to_le32(next);
- rp->rx_skbuff[i] = 0;
+ rp->rx_skbuff[i] = NULL;
}
/* Mark the last entry as wrapping the ring. */
rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(rp->rx_skbuff[i]);
}
- rp->rx_skbuff[i] = 0;
+ rp->rx_skbuff[i] = NULL;
}
}
rp->dirty_tx = rp->cur_tx = 0;
next = rp->tx_ring_dma;
for (i = 0; i < TX_RING_SIZE; i++) {
- rp->tx_skbuff[i] = 0;
+ rp->tx_skbuff[i] = NULL;
rp->tx_ring[i].tx_status = 0;
rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
next += sizeof(struct tx_desc);
}
dev_kfree_skb(rp->tx_skbuff[i]);
}
- rp->tx_skbuff[i] = 0;
- rp->tx_buf[i] = 0;
+ rp->tx_skbuff[i] = NULL;
+ rp->tx_buf[i] = NULL;
}
}
struct velocity_info *vptr = dev->priv;
struct mac_regs * regs = vptr->mac_regs;
unsigned long flags;
- struct mii_ioctl_data *miidata = (struct mii_ioctl_data *) &(ifr->ifr_data);
+ struct mii_ioctl_data *miidata = if_mii(ifr);
int err;
switch (cmd) {
register u32 _crc;
_crc = crc;
- __asm __volatile (
+ __asm__ __volatile__ (
"xorl %%ebx, %%ebx\n"
"movl %2, %%esi\n"
"movl %3, %%ecx\n"
config ARLAN
tristate "Aironet Arlan 655 & IC2200 DS support"
- depends on NET_RADIO && ISA
+ depends on NET_RADIO && ISA && !64BIT
---help---
Aironet makes Arlan, a class of wireless LAN adapters. These use the
www.Telxon.com chip, which is also used on several similar cards.
config AIRO
tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
- depends on NET_RADIO && (ISA || PCI)
+ depends on NET_RADIO && ISA && (PCI || BROKEN)
---help---
This is the standard Linux driver to support Cisco/Aironet ISA and
PCI 802.11 wireless cards.
static void wifi_setup(struct net_device *dev)
{
- dev->hard_header = 0;
- dev->rebuild_header = 0;
- dev->hard_header_cache = 0;
- dev->header_cache_update= 0;
+ dev->hard_header = NULL;
+ dev->rebuild_header = NULL;
+ dev->hard_header_cache = NULL;
+ dev->header_cache_update= NULL;
dev->hard_header_parse = wll_header_parse;
dev->hard_start_xmit = &airo_start_xmit11;
}
ai = dev->priv;
- ai->wifidev = 0;
+ ai->wifidev = NULL;
ai->flags = 0;
if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
printk(KERN_DEBUG "airo: Found an MPI350 card\n");
struct net_device *init_airo_card( unsigned short irq, int port, int is_pcmcia )
{
- return _init_airo_card ( irq, port, is_pcmcia, 0);
+ return _init_airo_card ( irq, port, is_pcmcia, NULL);
}
EXPORT_SYMBOL(init_airo_card);
.release = proc_close
};
-static struct proc_dir_entry *airo_entry = 0;
+static struct proc_dir_entry *airo_entry;
struct proc_data {
int release_buffer;
(data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) {
index = data->wbuffer[0] - '0';
if (data->wbuffer[1] == '\n') {
- set_wep_key(ai, index, 0, 0, 1, 1);
+ set_wep_key(ai, index, NULL, 0, 1, 1);
return;
}
j = 2;
}
data->writelen = 0;
data->maxwritelen = 0;
- data->wbuffer = 0;
- data->on_close = 0;
+ data->wbuffer = NULL;
+ data->on_close = NULL;
if (file->f_mode & FMODE_WRITE) {
if (!(file->f_mode & FMODE_READ)) {
static struct net_device_list {
struct net_device *dev;
struct net_device_list *next;
-} *airo_devices = 0;
+} *airo_devices;
/* Since the card doesn't automatically switch to the right WEP mode,
we will make it do it. If the card isn't associated, every secs we
break;
case AUTH_SHAREDKEY:
if (apriv->keyindex < auto_wep) {
- set_wep_key(apriv, apriv->keyindex, 0, 0, 0, 0);
+ set_wep_key(apriv, apriv->keyindex, NULL, 0, 0, 0);
apriv->config.authType = AUTH_SHAREDKEY;
apriv->keyindex++;
} else {
/* Drop to ENCRYPT */
apriv->keyindex = 0;
- set_wep_key(apriv, apriv->defindex, 0, 0, 0, 0);
+ set_wep_key(apriv, apriv->defindex, NULL, 0, 0, 0);
apriv->config.authType = AUTH_ENCRYPT;
}
break;
/* Do we want to just set the transmit key index ? */
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
if ((index >= 0) && (index < ((cap_rid.softCap & 0x80)?4:1))) {
- set_wep_key(local, index, 0, 0, 1, 1);
+ set_wep_key(local, index, NULL, 0, 1, 1);
} else
/* Don't complain if only change the mode */
if(!dwrq->flags & IW_ENCODE_MODE) {
if (card->vaddr)
iounmap(card->vaddr);
- card->vaddr = 0;
+ card->vaddr = NULL;
macio_release_resource(mdev, 0);
static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0";
static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp)
+ void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
int retv, pos, devnum;
*lenp = pos;
if (!write)
- retv = proc_dostring(ctl, write, filp, buffer, lenp);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
else
{
*lenp = 0;
static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp)
+ void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
int retv, pos, devnum;
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
return retv;
}
static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp)
+ void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
int retv, pos, devnum;
SARLBNpln(u_char, txBuffer, 0x800);
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
return retv;
}
static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp)
+ void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
int retv, pos, devnum;
SARLBNpln(u_char, rxBuffer, 0x800);
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
return retv;
}
static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp)
+ void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
int retv, pos, devnum;
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp);
+ retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
return retv;
}
static char conf_reset_result[200];
static int arlan_configure(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp)
+ void __user *buffer, size_t * lenp, loff_t *ppos)
{
int pos = 0;
int devnum = ctl->procname[6] - '0';
return -1;
*lenp = pos;
- return proc_dostring(ctl, write, filp, buffer, lenp);
+ return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
}
static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp)
+ void __user *buffer, size_t * lenp, loff_t *ppos)
{
int pos = 0;
int devnum = ctl->procname[5] - '0';
} else
return -1;
*lenp = pos + 3;
- return proc_dostring(ctl, write, filp, buffer, lenp);
+ return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
}
extern int arlan_entry_debug;
extern int arlan_exit_debug;
extern int testMemory;
-extern const char* arlan_version;
extern int arlan_command(struct net_device * dev, int command);
#define SIDUNKNOWN -1
*
*/
-#define __KERNEL_SYSCALLS__
-
#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
{
islpci_private *priv = netdev_priv(ndev);
struct islpci_mgmtframe *response = NULL;
- int ret = -EIO, response_op = PIMFOR_OP_ERROR;
+ int ret = -EIO;
printk("%s: get_oid 0x%08X\n", ndev->name, priv->priv_oid);
data->length = 0;
islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
priv->priv_oid, extra, 256,
&response);
- response_op = response->header->operation;
printk("%s: ret: %i\n", ndev->name, ret);
- printk("%s: response_op: %i\n", ndev->name, response_op);
if (ret || !response
|| response->header->operation == PIMFOR_OP_ERROR) {
if (response) {
priv->priv_oid, extra, data->length,
&response);
printk("%s: ret: %i\n", ndev->name, ret);
+ if (ret || !response
+ || response->header->operation == PIMFOR_OP_ERROR) {
+ if (response) {
+ islpci_mgt_release(response);
+ }
+ printk("%s: EIO\n", ndev->name);
+ ret = -EIO;
+ }
if (!ret) {
response_op = response->header->operation;
printk("%s: response_op: %i\n", ndev->name,
response_op);
islpci_mgt_release(response);
}
- if (ret || response_op == PIMFOR_OP_ERROR) {
- printk("%s: EIO\n", ndev->name);
- ret = -EIO;
- }
}
return (ret ? ret : -EINPROGRESS);
mdelay(50);
{
- const struct firmware *fw_entry = 0;
+ const struct firmware *fw_entry = NULL;
long fw_len;
const u32 *fw_ptr;
if (priv->device_base)
iounmap(priv->device_base);
- priv->device_base = 0;
+ priv->device_base = NULL;
/* free consistent DMA area... */
if (priv->driver_mem_address)
priv->device_host_address);
/* clear some dangling pointers */
- priv->driver_mem_address = 0;
+ priv->driver_mem_address = NULL;
priv->device_host_address = 0;
priv->device_psm_buffer = 0;
- priv->control_block = 0;
+ priv->control_block = NULL;
/* clean up mgmt rx buffers */
for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
if (priv->data_low_rx[counter])
dev_kfree_skb(priv->data_low_rx[counter]);
- priv->data_low_rx[counter] = 0;
+ priv->data_low_rx[counter] = NULL;
}
/* Free the acces control list and the WPA list */
do_islpci_free_memory:
islpci_free_memory(priv);
do_free_netdev:
- pci_set_drvdata(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
- priv = 0;
+ priv = NULL;
return NULL;
}
MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter");
MODULE_LICENSE("GPL");
+static int init_pcitm = 0;
+module_param(init_pcitm, int, 0);
+
/* In this order: vendor, device, subvendor, subdevice, class, class_mask,
* driver_data
* If you have an update for this please contact prism54-devel@prism54.org
*
* Writing zero to both these two registers will disable both timeouts and
* *can* solve problems caused by devices that are slow to respond.
+ * Make this configurable - MSW
*/
- /* I am taking these out, we should not be poking around in the
- * programmable timers - MSW
- */
-/* Do not zero the programmable timers
- pci_write_config_byte(pdev, 0x40, 0);
- pci_write_config_byte(pdev, 0x41, 0);
-*/
+ if ( init_pcitm >= 0 ) {
+ pci_write_config_byte(pdev, 0x40, (u8)init_pcitm);
+ pci_write_config_byte(pdev, 0x41, (u8)init_pcitm);
+ } else {
+ printk(KERN_INFO "PCI TRDY/RETRY unchanged\n");
+ }
/* request the pci device I/O regions */
rvalue = pci_request_regions(pdev, DRV_NAME);
do_unregister_netdev:
unregister_netdev(ndev);
islpci_free_memory(priv);
- pci_set_drvdata(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
- priv = 0;
+ priv = NULL;
do_pci_release_regions:
pci_release_regions(pdev);
do_pci_disable_device:
prism54_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- islpci_private *priv = ndev ? netdev_priv(ndev) : 0;
+ islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
BUG_ON(!priv);
if (!__in_cleanup_module) {
/* free the PCI memory and unmap the remapped page */
islpci_free_memory(priv);
- pci_set_drvdata(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
- priv = 0;
+ priv = NULL;
pci_release_regions(pdev);
prism54_suspend(struct pci_dev *pdev, u32 state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- islpci_private *priv = ndev ? netdev_priv(ndev) : 0;
+ islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
BUG_ON(!priv);
printk(KERN_NOTICE "%s: got suspend request (state %d)\n",
prism54_resume(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- islpci_private *priv = ndev ? netdev_priv(ndev) : 0;
+ islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
BUG_ON(!priv);
printk(KERN_NOTICE "%s: got resume request\n", ndev->name);
int err;
DEFINE_WAIT(wait);
+ *recvframe = NULL;
+
if (down_interruptible(&priv->mgmt_sem))
return -ERESTARTSYS;
mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data)
{
int ret = 0;
- struct islpci_mgmtframe *response;
+ struct islpci_mgmtframe *response = NULL;
int response_op = PIMFOR_OP_ERROR;
int dlen;
void *cache, *_data = data;
DOT11_OID_DEFKEYID,
DOT11_OID_DOT1XENABLE,
OID_INL_DOT11D_CONFORMANCE,
+ /* Do not initialize this - fw < 1.0.4.3 rejects it
OID_INL_OUTPUTPOWER,
+ */
};
/* update the MAC addr. */
static int
mgt_update_addr(islpci_private *priv)
{
- struct islpci_mgmtframe *res;
+ struct islpci_mgmtframe *res = NULL;
int ret;
ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom },
- {0,},
+ {NULL,},
};
static struct pci_device_id yellowfin_pci_tbl[] = {
#ifdef NO_TXSTATS
/* In this mode the Tx ring needs only a single descriptor. */
for (i = 0; i < TX_RING_SIZE; i++) {
- yp->tx_skbuff[i] = 0;
+ yp->tx_skbuff[i] = NULL;
yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
- yp->tx_skbuff[entry] = 0;
+ yp->tx_skbuff[entry] = NULL;
}
if (yp->tx_full
&& yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
if (yp->rx_skbuff[i]) {
dev_kfree_skb(yp->rx_skbuff[i]);
}
- yp->rx_skbuff[i] = 0;
+ yp->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (yp->tx_skbuff[i])
dev_kfree_skb(yp->tx_skbuff[i]);
- yp->tx_skbuff[i] = 0;
+ yp->tx_skbuff[i] = NULL;
}
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset)
{
- size_t len = strlen(str);
-
- if (!count)
- return 0;
-
- if (*offset > len)
- return 0;
-
- if (count > len - *offset)
- count = len - *offset;
-
- if (copy_to_user(buf, str + *offset, count))
- return -EFAULT;
-
- *offset += count;
-
- return count;
+ return simple_read_from_buffer(buf, count, offset, str, strlen(str));
}
ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset)
{
char tmpbuf[TMPBUFSIZE];
- size_t maxlen;
-
- if (!count)
- return 0;
-
- spin_lock(&oprofilefs_lock);
- maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
- spin_unlock(&oprofilefs_lock);
+ size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
if (maxlen > TMPBUFSIZE)
maxlen = TMPBUFSIZE;
-
- if (*offset > maxlen)
- return 0;
-
- if (count > maxlen - *offset)
- count = maxlen - *offset;
-
- if (copy_to_user(buf, tmpbuf + *offset, count))
- return -EFAULT;
-
- *offset += count;
-
- return count;
+ return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
}
#define PARPORT_MAX_SPINTIME_VALUE 1000
static int do_active_device(ctl_table *table, int write, struct file *filp,
- void __user *result, size_t *lenp)
+ void __user *result, size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[256];
if (write) /* can't happen anyway */
return -EACCES;
- if (filp->f_pos) {
+ if (*ppos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
#ifdef CONFIG_PARPORT_1284
static int do_autoprobe(ctl_table *table, int write, struct file *filp,
- void __user *result, size_t *lenp)
+ void __user *result, size_t *lenp, loff_t *ppos)
{
struct parport_device_info *info = table->extra2;
const char *str;
if (write) /* permissions stop this */
return -EACCES;
- if (filp->f_pos) {
+ if (*ppos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return copy_to_user (result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_base_addr (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp)
+ size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
- if (filp->f_pos) {
+ if (*ppos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_irq (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp)
+ size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
- if (filp->f_pos) {
+ if (*ppos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_dma (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp)
+ size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[20];
int len = 0;
- if (filp->f_pos) {
+ if (*ppos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
static int do_hardware_modes (ctl_table *table, int write,
struct file *filp, void __user *result,
- size_t *lenp)
+ size_t *lenp, loff_t *ppos)
{
struct parport *port = (struct parport *)table->extra1;
char buffer[40];
int len = 0;
- if (filp->f_pos) {
+ if (*ppos) {
*lenp = 0;
return 0;
}
else
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
}
#
# PCI configuration
#
-config PCI_USE_VECTOR
- bool "Vector-based interrupt indexing (MSI)"
+config PCI_MSI
+ bool "Message Signaled Interrupts (MSI and MSI-X)"
depends on (X86_LOCAL_APIC && X86_IO_APIC) || IA64
default n
help
- This replaces the current existing IRQ-based index interrupt scheme
- with the vector-base index scheme. The advantages of vector base
- over IRQ base are listed below:
- 1) Support MSI implementation.
- 2) Support future IOxAPIC hotplug
-
- Note that this allows the device drivers to enable MSI, Message
- Signaled Interrupt, on all MSI capable device functions detected.
- Message Signal Interrupt enables an MSI-capable hardware device to
- send an inbound Memory Write on its PCI bus instead of asserting
- IRQ signal on device IRQ pin.
+ This allows device drivers to enable MSI (Message Signaled
+ Interrupts). Message Signaled Interrupts enable a device to
+ generate an interrupt using an inbound Memory Write on its
+ PCI bus instead of asserting a device IRQ pin.
If you don't know what to do here, say N.
obj-$(CONFIG_PPC64) += setup-bus.o
obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
obj-$(CONFIG_X86_VISWS) += setup-irq.o
-obj-$(CONFIG_PCI_USE_VECTOR) += msi.o
+obj-$(CONFIG_PCI_MSI) += msi.o
# Cardbus & CompactPCI use setup-bus
obj-$(CONFIG_HOTPLUG) += setup-bus.o
case PCI_CAP_ID_MSI:
{
int pos;
- unsigned int mask_bits;
+ u32 mask_bits;
pos = entry->mask_base;
- entry->dev->bus->ops->read(entry->dev->bus, entry->dev->devfn,
- pos, 4, &mask_bits);
+ pci_read_config_dword(entry->dev, pos, &mask_bits);
mask_bits &= ~(1);
mask_bits |= flag;
- entry->dev->bus->ops->write(entry->dev->bus, entry->dev->devfn,
- pos, 4, mask_bits);
+ pci_write_config_dword(entry->dev, pos, mask_bits);
break;
}
case PCI_CAP_ID_MSIX:
if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI)))
return;
- entry->dev->bus->ops->read(entry->dev->bus, entry->dev->devfn,
- msi_lower_address_reg(pos), 4,
+ pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
&address.lo_address.value);
address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
address.lo_address.value |= (cpu_mask_to_apicid(cpu_mask) <<
MSI_TARGET_CPU_SHIFT);
entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask);
- entry->dev->bus->ops->write(entry->dev->bus, entry->dev->devfn,
- msi_lower_address_reg(pos), 4,
+ pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
address.lo_address.value);
break;
}
static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
{
+ struct msi_desc *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msi_lock, flags);
+ entry = msi_desc[vector];
+ if (!entry || !entry->dev) {
+ spin_unlock_irqrestore(&msi_lock, flags);
+ return 0;
+ }
+ entry->msi_attrib.state = 1; /* Mark it active */
+ spin_unlock_irqrestore(&msi_lock, flags);
+
return 0; /* never anything pending */
}
-static void pci_disable_msi(unsigned int vector);
+static void release_msi(unsigned int vector);
static void shutdown_msi_irq(unsigned int vector)
{
- pci_disable_msi(vector);
+ release_msi(vector);
}
#define shutdown_msi_irq_wo_maskbit shutdown_msi_irq
static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
{
+ struct msi_desc *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msi_lock, flags);
+ entry = msi_desc[vector];
+ if (!entry || !entry->dev) {
+ spin_unlock_irqrestore(&msi_lock, flags);
+ return 0;
+ }
+ entry->msi_attrib.state = 1; /* Mark it active */
+ spin_unlock_irqrestore(&msi_lock, flags);
+
unmask_MSI_irq(vector);
return 0; /* never anything pending */
}
* which implement the MSI-X Capability Structure.
*/
static struct hw_interrupt_type msix_irq_type = {
- .typename = "PCI MSI-X",
+ .typename = "PCI-MSI-X",
.startup = startup_msi_irq_w_maskbit,
.shutdown = shutdown_msi_irq_w_maskbit,
.enable = enable_msi_irq_w_maskbit,
* Mask-and-Pending Bits.
*/
static struct hw_interrupt_type msi_irq_w_maskbit_type = {
- .typename = "PCI MSI",
+ .typename = "PCI-MSI",
.startup = startup_msi_irq_w_maskbit,
.shutdown = shutdown_msi_irq_w_maskbit,
.enable = enable_msi_irq_w_maskbit,
* Mask-and-Pending Bits.
*/
static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
- .typename = "PCI MSI",
+ .typename = "PCI-MSI",
.startup = startup_msi_irq_wo_maskbit,
.shutdown = shutdown_msi_irq_wo_maskbit,
.enable = enable_msi_irq_wo_maskbit,
msi_address->lo_address.value |= (MSI_TARGET_CPU << MSI_TARGET_CPU_SHIFT);
}
+static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
static int assign_msi_vector(void)
{
static int new_vector_avail = 1;
spin_lock_irqsave(&msi_lock, flags);
if (!new_vector_avail) {
+ int free_vector = 0;
+
/*
* vector_irq[] = -1 indicates that this specific vector is:
* - assigned for MSI (since MSI have no associated IRQ) or
for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
if (vector_irq[vector] != 0)
continue;
- vector_irq[vector] = -1;
- nr_released_vectors--;
+ free_vector = vector;
+ if (!msi_desc[vector])
+ break;
+ else
+ continue;
+ }
+ if (!free_vector) {
spin_unlock_irqrestore(&msi_lock, flags);
- return vector;
+ return -EBUSY;
}
+ vector_irq[free_vector] = -1;
+ nr_released_vectors--;
spin_unlock_irqrestore(&msi_lock, flags);
- return -EBUSY;
+ if (msi_desc[free_vector] != NULL) {
+ struct pci_dev *dev;
+ int tail;
+
+ /* free all linked vectors before re-assign */
+ do {
+ spin_lock_irqsave(&msi_lock, flags);
+ dev = msi_desc[free_vector]->dev;
+ tail = msi_desc[free_vector]->link.tail;
+ spin_unlock_irqrestore(&msi_lock, flags);
+ msi_free_vector(dev, tail, 1);
+ } while (free_vector != tail);
+ }
+
+ return free_vector;
}
vector = assign_irq_vector(AUTO_ASSIGN);
last_alloc_vector = vector;
printk(KERN_INFO "WARNING: MSI INIT FAILURE\n");
return status;
}
+ last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
+ if (last_alloc_vector < 0) {
+ pci_msi_enable = 0;
+ printk(KERN_INFO "WARNING: ALL VECTORS ARE BUSY\n");
+ status = -EBUSY;
+ return status;
+ }
+ vector_irq[last_alloc_vector] = 0;
+ nr_released_vectors++;
printk(KERN_INFO "MSI INIT SUCCESS\n");
return status;
static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
{
- u32 control;
+ u16 control;
- dev->bus->ops->read(dev->bus, dev->devfn,
- msi_control_reg(pos), 2, &control);
+ pci_read_config_word(dev, msi_control_reg(pos), &control);
if (type == PCI_CAP_ID_MSI) {
/* Set enabled bits to single MSI & enable MSI_enable bit */
msi_enable(control, 1);
- dev->bus->ops->write(dev->bus, dev->devfn,
- msi_control_reg(pos), 2, control);
+ pci_write_config_word(dev, msi_control_reg(pos), control);
} else {
msix_enable(control);
- dev->bus->ops->write(dev->bus, dev->devfn,
- msi_control_reg(pos), 2, control);
+ pci_write_config_word(dev, msi_control_reg(pos), control);
}
if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
/* PCI Express Endpoint device detected */
- u32 cmd;
- dev->bus->ops->read(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
+ u16 cmd;
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_INTX_DISABLE;
- dev->bus->ops->write(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
}
}
static void disable_msi_mode(struct pci_dev *dev, int pos, int type)
{
- u32 control;
+ u16 control;
- dev->bus->ops->read(dev->bus, dev->devfn,
- msi_control_reg(pos), 2, &control);
+ pci_read_config_word(dev, msi_control_reg(pos), &control);
if (type == PCI_CAP_ID_MSI) {
/* Set enabled bits to single MSI & enable MSI_enable bit */
msi_disable(control);
- dev->bus->ops->write(dev->bus, dev->devfn,
- msi_control_reg(pos), 2, control);
+ pci_write_config_word(dev, msi_control_reg(pos), control);
} else {
msix_disable(control);
- dev->bus->ops->write(dev->bus, dev->devfn,
- msi_control_reg(pos), 2, control);
+ pci_write_config_word(dev, msi_control_reg(pos), control);
}
if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
/* PCI Express Endpoint device detected */
- u32 cmd;
- dev->bus->ops->read(dev->bus, dev->devfn, PCI_COMMAND, 2, &cmd);
+ u16 cmd;
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd &= ~PCI_COMMAND_INTX_DISABLE;
- dev->bus->ops->write(dev->bus, dev->devfn, PCI_COMMAND, 2, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
}
}
-static int msi_lookup_vector(struct pci_dev *dev)
+static int msi_lookup_vector(struct pci_dev *dev, int type)
{
int vector;
unsigned long flags;
spin_lock_irqsave(&msi_lock, flags);
for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
- msi_desc[vector]->msi_attrib.entry_nr ||
+ msi_desc[vector]->msi_attrib.type != type ||
msi_desc[vector]->msi_attrib.default_vector != dev->irq)
- continue; /* not entry 0, skip */
+ continue;
spin_unlock_irqrestore(&msi_lock, flags);
- /* This pre-assigned entry-0 MSI vector for this device
+ /* This pre-assigned MSI vector for this device
already exits. Override dev->irq with this vector */
dev->irq = vector;
return 0;
if (!dev)
return;
- if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0) {
- nr_reserved_vectors++;
+ if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
nr_msix_devices++;
- } else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
+ else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
nr_reserved_vectors++;
}
struct msg_address address;
struct msg_data data;
int pos, vector;
- u32 control;
+ u16 control;
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (!pos)
- return -EINVAL;
-
- dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos),
- 2, &control);
- if (control & PCI_MSI_FLAGS_ENABLE)
- return 0;
-
- if (!msi_lookup_vector(dev)) {
- /* Lookup Sucess */
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
- return 0;
- }
+ pci_read_config_word(dev, msi_control_reg(pos), &control);
/* MSI Entry Initialization */
if (!(entry = alloc_msi_entry()))
return -ENOMEM;
kmem_cache_free(msi_cachep, entry);
return -EBUSY;
}
+ entry->link.head = vector;
+ entry->link.tail = vector;
entry->msi_attrib.type = PCI_CAP_ID_MSI;
+ entry->msi_attrib.state = 0; /* Mark it not active */
entry->msi_attrib.entry_nr = 0;
entry->msi_attrib.maskbit = is_mask_bit_support(control);
- entry->msi_attrib.default_vector = dev->irq;
- dev->irq = vector; /* save default pre-assigned ioapic vector */
+ entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */
+ dev->irq = vector;
entry->dev = dev;
if (is_mask_bit_support(control)) {
entry->mask_base = msi_mask_bits_reg(pos,
msi_data_init(&data, vector);
entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
- dev->bus->ops->write(dev->bus, dev->devfn, msi_lower_address_reg(pos),
- 4, address.lo_address.value);
+ pci_write_config_dword(dev, msi_lower_address_reg(pos),
+ address.lo_address.value);
if (is_64bit_address(control)) {
- dev->bus->ops->write(dev->bus, dev->devfn,
- msi_upper_address_reg(pos), 4, address.hi_address);
- dev->bus->ops->write(dev->bus, dev->devfn,
- msi_data_reg(pos, 1), 2, *((u32*)&data));
+ pci_write_config_dword(dev,
+ msi_upper_address_reg(pos), address.hi_address);
+ pci_write_config_word(dev,
+ msi_data_reg(pos, 1), *((u32*)&data));
} else
- dev->bus->ops->write(dev->bus, dev->devfn,
- msi_data_reg(pos, 0), 2, *((u32*)&data));
+ pci_write_config_word(dev,
+ msi_data_reg(pos, 0), *((u32*)&data));
if (entry->msi_attrib.maskbit) {
unsigned int maskbits, temp;
/* All MSIs are unmasked by default, Mask them all */
- dev->bus->ops->read(dev->bus, dev->devfn,
- msi_mask_bits_reg(pos, is_64bit_address(control)), 4,
+ pci_read_config_dword(dev,
+ msi_mask_bits_reg(pos, is_64bit_address(control)),
&maskbits);
temp = (1 << multi_msi_capable(control));
temp = ((temp - 1) & ~temp);
maskbits |= temp;
- dev->bus->ops->write(dev->bus, dev->devfn,
- msi_mask_bits_reg(pos, is_64bit_address(control)), 4,
+ pci_write_config_dword(dev,
+ msi_mask_bits_reg(pos, is_64bit_address(control)),
maskbits);
}
attach_msi_entry(entry, vector);
* @dev: pointer to the pci_dev data structure of MSI-X device function
*
* Setup the MSI-X capability structure of device funtion with a
- * single MSI-X vector. A return of zero indicates the successful setup
- * of an entry zero with the new MSI-X vector or non-zero for otherwise.
- * To request for additional MSI-X vectors, the device drivers are
- * required to utilize the following supported APIs:
- * 1) msi_alloc_vectors(...) for requesting one or more MSI-X vectors
- * 2) msi_free_vectors(...) for releasing one or more MSI-X vectors
- * back to PCI subsystem before calling free_irq(...)
+ * single MSI-X vector. A return of zero indicates the successful setup of
+ * requested MSI-X entries with allocated vectors or non-zero for otherwise.
**/
-static int msix_capability_init(struct pci_dev *dev)
+static int msix_capability_init(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
{
- struct msi_desc *entry;
+ struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
struct msg_address address;
struct msg_data data;
- int vector = 0, pos, dev_msi_cap, i;
+ int vector, pos, i, j, nr_entries, temp = 0;
u32 phys_addr, table_offset;
- u32 control;
+ u16 control;
u8 bir;
void *base;
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (!pos)
- return -EINVAL;
-
/* Request & Map MSI-X table region */
- dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos), 2,
- &control);
- if (control & PCI_MSIX_FLAGS_ENABLE)
- return 0;
-
- if (!msi_lookup_vector(dev)) {
- /* Lookup Sucess */
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
- return 0;
- }
-
- dev_msi_cap = multi_msix_capable(control);
- dev->bus->ops->read(dev->bus, dev->devfn,
- msix_table_offset_reg(pos), 4, &table_offset);
+ pci_read_config_word(dev, msi_control_reg(pos), &control);
+ nr_entries = multi_msix_capable(control);
+ pci_read_config_dword(dev, msix_table_offset_reg(pos),
+ &table_offset);
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
phys_addr = pci_resource_start (dev, bir);
phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK);
if (!request_mem_region(phys_addr,
- dev_msi_cap * PCI_MSIX_ENTRY_SIZE,
- "MSI-X iomap Failure"))
+ nr_entries * PCI_MSIX_ENTRY_SIZE,
+ "MSI-X vector table"))
return -ENOMEM;
- base = ioremap_nocache(phys_addr, dev_msi_cap * PCI_MSIX_ENTRY_SIZE);
- if (base == NULL)
- goto free_region;
- /* MSI Entry Initialization */
- entry = alloc_msi_entry();
- if (!entry)
- goto free_iomap;
- if ((vector = get_msi_vector(dev)) < 0)
- goto free_entry;
+ base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
+ if (base == NULL) {
+ release_mem_region(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
+ return -ENOMEM;
+ }
+ /* MSI-X Table Initialization */
+ for (i = 0; i < nvec; i++) {
+ entry = alloc_msi_entry();
+ if (!entry)
+ break;
+ if ((vector = get_msi_vector(dev)) < 0)
+ break;
- entry->msi_attrib.type = PCI_CAP_ID_MSIX;
- entry->msi_attrib.entry_nr = 0;
- entry->msi_attrib.maskbit = 1;
- entry->msi_attrib.default_vector = dev->irq;
- dev->irq = vector; /* save default pre-assigned ioapic vector */
- entry->dev = dev;
- entry->mask_base = (unsigned long)base;
- /* Replace with MSI handler */
- irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
- /* Configure MSI-X capability structure */
- msi_address_init(&address);
- msi_data_init(&data, vector);
- entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
- MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
- writel(address.lo_address.value, base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(address.hi_address, base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(*(u32*)&data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
- /* Initialize all entries from 1 up to 0 */
- for (i = 1; i < dev_msi_cap; i++) {
- writel(0, base + i * PCI_MSIX_ENTRY_SIZE +
+ j = entries[i].entry;
+ entries[i].vector = vector;
+ entry->msi_attrib.type = PCI_CAP_ID_MSIX;
+ entry->msi_attrib.state = 0; /* Mark it not active */
+ entry->msi_attrib.entry_nr = j;
+ entry->msi_attrib.maskbit = 1;
+ entry->msi_attrib.default_vector = dev->irq;
+ entry->dev = dev;
+ entry->mask_base = (unsigned long)base;
+ if (!head) {
+ entry->link.head = vector;
+ entry->link.tail = vector;
+ head = entry;
+ } else {
+ entry->link.head = temp;
+ entry->link.tail = tail->link.tail;
+ tail->link.tail = vector;
+ head->link.head = vector;
+ }
+ temp = vector;
+ tail = entry;
+ /* Replace with MSI-X handler */
+ irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
+ /* Configure MSI-X capability structure */
+ msi_address_init(&address);
+ msi_data_init(&data, vector);
+ entry->msi_attrib.current_cpu =
+ ((address.lo_address.u.dest_id >>
+ MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
+ writel(address.lo_address.value,
+ base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(0, base + i * PCI_MSIX_ENTRY_SIZE +
+ writel(address.hi_address,
+ base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(0, base + i * PCI_MSIX_ENTRY_SIZE +
+ writel(*(u32*)&data,
+ base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_DATA_OFFSET);
+ attach_msi_entry(entry, vector);
}
- attach_msi_entry(entry, vector);
- /* Set MSI enabled bits */
+ if (i != nvec) {
+ i--;
+ for (; i >= 0; i--) {
+ vector = (entries + i)->vector;
+ msi_free_vector(dev, vector, 0);
+ (entries + i)->vector = 0;
+ }
+ return -EBUSY;
+ }
+ /* Set MSI-X enabled bits */
enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
return 0;
-
-free_entry:
- kmem_cache_free(msi_cachep, entry);
-free_iomap:
- iounmap(base);
-free_region:
- release_mem_region(phys_addr, dev_msi_cap * PCI_MSIX_ENTRY_SIZE);
-
- return ((vector < 0) ? -EBUSY : -ENOMEM);
}
/**
- * pci_enable_msi - configure device's MSI(X) capability structure
- * @dev: pointer to the pci_dev data structure of MSI(X) device function
+ * pci_enable_msi - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
*
- * Setup the MSI/MSI-X capability structure of device function with
- * a single MSI(X) vector upon its software driver call to request for
- * MSI(X) mode enabled on its hardware device function. A return of zero
- * indicates the successful setup of an entry zero with the new MSI(X)
+ * Setup the MSI capability structure of device function with
+ * a single MSI vector upon its software driver call to request for
+ * MSI mode enabled on its hardware device function. A return of zero
+ * indicates the successful setup of an entry zero with the new MSI
* vector or non-zero for otherwise.
**/
int pci_enable_msi(struct pci_dev* dev)
{
- int status = -EINVAL;
+ int pos, temp = dev->irq, status = -EINVAL;
+ u16 control;
if (!pci_msi_enable || !dev)
return status;
- if (msi_init() < 0)
- return -ENOMEM;
+ if ((status = msi_init()) < 0)
+ return status;
- if ((status = msix_capability_init(dev)) == -EINVAL)
- status = msi_capability_init(dev);
- if (!status)
- nr_reserved_vectors--;
+ if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
+ return -EINVAL;
+
+ pci_read_config_word(dev, msi_control_reg(pos), &control);
+ if (control & PCI_MSI_FLAGS_ENABLE)
+ return 0; /* Already in MSI mode */
+
+ if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
+ /* Lookup Sucess */
+ unsigned long flags;
+
+ spin_lock_irqsave(&msi_lock, flags);
+ if (!vector_irq[dev->irq]) {
+ msi_desc[dev->irq]->msi_attrib.state = 0;
+ vector_irq[dev->irq] = -1;
+ nr_released_vectors--;
+ spin_unlock_irqrestore(&msi_lock, flags);
+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
+ return 0;
+ }
+ spin_unlock_irqrestore(&msi_lock, flags);
+ dev->irq = temp;
+ }
+ /* Check whether driver already requested for MSI-X vectors */
+ if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 &&
+ !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
+ printk(KERN_INFO "Can't enable MSI. Device already had MSI-X vectors assigned\n");
+ dev->irq = temp;
+ return -EINVAL;
+ }
+ status = msi_capability_init(dev);
+ if (!status) {
+ if (!pos)
+ nr_reserved_vectors--; /* Only MSI capable */
+ else if (nr_msix_devices > 0)
+ nr_msix_devices--; /* Both MSI and MSI-X capable,
+ but choose enabling MSI */
+ }
return status;
}
-static int msi_free_vector(struct pci_dev* dev, int vector);
-static void pci_disable_msi(unsigned int vector)
+void pci_disable_msi(struct pci_dev* dev)
{
- int head, tail, type, default_vector;
struct msi_desc *entry;
- struct pci_dev *dev;
+ int pos, default_vector;
+ u16 control;
unsigned long flags;
+ if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
+ return;
+
+ pci_read_config_word(dev, msi_control_reg(pos), &control);
+ if (!(control & PCI_MSI_FLAGS_ENABLE))
+ return;
+
spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[vector];
- if (!entry || !entry->dev) {
+ entry = msi_desc[dev->irq];
+ if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
spin_unlock_irqrestore(&msi_lock, flags);
return;
}
- dev = entry->dev;
- type = entry->msi_attrib.type;
- head = entry->link.head;
- tail = entry->link.tail;
- default_vector = entry->msi_attrib.default_vector;
- spin_unlock_irqrestore(&msi_lock, flags);
-
- disable_msi_mode(dev, pci_find_capability(dev, type), type);
- /* Restore dev->irq to its default pin-assertion vector */
- dev->irq = default_vector;
- if (type == PCI_CAP_ID_MSIX && head != tail) {
- /* Bad driver, which do not call msi_free_vectors before exit.
- We must do a cleanup here */
- while (1) {
- spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[vector];
- head = entry->link.head;
- tail = entry->link.tail;
- spin_unlock_irqrestore(&msi_lock, flags);
- if (tail == head)
- break;
- if (msi_free_vector(dev, entry->link.tail))
- break;
- }
+ if (entry->msi_attrib.state) {
+ spin_unlock_irqrestore(&msi_lock, flags);
+ printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on vector->%d\n",
+ dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+ dev->irq);
+ BUG_ON(entry->msi_attrib.state > 0);
+ } else {
+ vector_irq[dev->irq] = 0; /* free it */
+ nr_released_vectors++;
+ default_vector = entry->msi_attrib.default_vector;
+ spin_unlock_irqrestore(&msi_lock, flags);
+ /* Restore dev->irq to its default pin-assertion vector */
+ dev->irq = default_vector;
+ disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
+ PCI_CAP_ID_MSI);
}
}
-static int msi_alloc_vector(struct pci_dev* dev, int head)
+static void release_msi(unsigned int vector)
{
struct msi_desc *entry;
- struct msg_address address;
- struct msg_data data;
- int i, offset, pos, dev_msi_cap, vector;
- u32 low_address, control;
- unsigned long base = 0L;
unsigned long flags;
spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[dev->irq];
- if (!entry) {
- spin_unlock_irqrestore(&msi_lock, flags);
- return -EINVAL;
- }
- base = entry->mask_base;
+ entry = msi_desc[vector];
+ if (entry && entry->dev)
+ entry->msi_attrib.state = 0; /* Mark it not active */
spin_unlock_irqrestore(&msi_lock, flags);
-
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos),
- 2, &control);
- dev_msi_cap = multi_msix_capable(control);
- for (i = 1; i < dev_msi_cap; i++) {
- if (!(low_address = readl(base + i * PCI_MSIX_ENTRY_SIZE)))
- break;
- }
- if (i >= dev_msi_cap)
- return -EINVAL;
-
- /* MSI Entry Initialization */
- if (!(entry = alloc_msi_entry()))
- return -ENOMEM;
-
- if ((vector = get_new_vector()) < 0) {
- kmem_cache_free(msi_cachep, entry);
- return vector;
- }
- entry->msi_attrib.type = PCI_CAP_ID_MSIX;
- entry->msi_attrib.entry_nr = i;
- entry->msi_attrib.maskbit = 1;
- entry->dev = dev;
- entry->link.head = head;
- entry->mask_base = base;
- irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
- /* Configure MSI-X capability structure */
- msi_address_init(&address);
- msi_data_init(&data, vector);
- entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
- MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
- offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
- writel(address.lo_address.value, base + offset +
- PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(address.hi_address, base + offset +
- PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(*(u32*)&data, base + offset + PCI_MSIX_ENTRY_DATA_OFFSET);
- writel(1, base + offset + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
- attach_msi_entry(entry, vector);
-
- return vector;
}
-static int msi_free_vector(struct pci_dev* dev, int vector)
+static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
{
struct msi_desc *entry;
- int entry_nr, type;
+ int head, entry_nr, type;
unsigned long base = 0L;
unsigned long flags;
}
type = entry->msi_attrib.type;
entry_nr = entry->msi_attrib.entry_nr;
+ head = entry->link.head;
base = entry->mask_base;
- if (entry->link.tail != entry->link.head) {
- msi_desc[entry->link.head]->link.tail = entry->link.tail;
- if (entry->link.tail)
- msi_desc[entry->link.tail]->link.head = entry->link.head;
- }
+ msi_desc[entry->link.head]->link.tail = entry->link.tail;
+ msi_desc[entry->link.tail]->link.head = entry->link.head;
entry->dev = NULL;
- vector_irq[vector] = 0;
- nr_released_vectors++;
+ if (!reassign) {
+ vector_irq[vector] = 0;
+ nr_released_vectors++;
+ }
msi_desc[vector] = NULL;
spin_unlock_irqrestore(&msi_lock, flags);
kmem_cache_free(msi_cachep, entry);
+
if (type == PCI_CAP_ID_MSIX) {
- int offset;
+ if (!reassign)
+ writel(1, base +
+ entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+
+ if (head == vector) {
+ /*
+ * Detect last MSI-X vector to be released.
+ * Release the MSI-X memory-mapped table.
+ */
+ int pos, nr_entries;
+ u32 phys_addr, table_offset;
+ u16 control;
+ u8 bir;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ pci_read_config_word(dev, msi_control_reg(pos),
+ &control);
+ nr_entries = multi_msix_capable(control);
+ pci_read_config_dword(dev, msix_table_offset_reg(pos),
+ &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+ phys_addr = pci_resource_start (dev, bir);
+ phys_addr += (u32)(table_offset &
+ ~PCI_MSIX_FLAGS_BIRMASK);
+ iounmap((void*)base);
+ release_mem_region(phys_addr,
+ nr_entries * PCI_MSIX_ENTRY_SIZE);
+ }
+ }
- offset = entry_nr * PCI_MSIX_ENTRY_SIZE;
- writel(1, base + offset + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
- writel(0, base + offset + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ return 0;
+}
+
+static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
+{
+ int vector = head, tail = 0;
+ int i = 0, j = 0, nr_entries = 0;
+ unsigned long base = 0L;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msi_lock, flags);
+ while (head != tail) {
+ nr_entries++;
+ tail = msi_desc[vector]->link.tail;
+ if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr)
+ j = vector;
+ vector = tail;
}
+ if (*nvec > nr_entries) {
+ spin_unlock_irqrestore(&msi_lock, flags);
+ *nvec = nr_entries;
+ return -EINVAL;
+ }
+ vector = ((j > 0) ? j : head);
+ for (i = 0; i < *nvec; i++) {
+ j = msi_desc[vector]->msi_attrib.entry_nr;
+ msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */
+ vector_irq[vector] = -1; /* Mark it busy */
+ nr_released_vectors--;
+ entries[i].vector = vector;
+ if (j != (entries + i)->entry) {
+ base = msi_desc[vector]->mask_base;
+ msi_desc[vector]->msi_attrib.entry_nr =
+ (entries + i)->entry;
+ writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base +
+ (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base +
+ (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
+ writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector,
+ base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_DATA_OFFSET);
+ }
+ vector = msi_desc[vector]->link.tail;
+ }
+ spin_unlock_irqrestore(&msi_lock, flags);
return 0;
}
/**
- * msi_alloc_vectors - allocate additional MSI-X vectors
+ * pci_enable_msix - configure device's MSI-X capability structure
* @dev: pointer to the pci_dev data structure of MSI-X device function
- * @vector: pointer to an array of new allocated MSI-X vectors
+ * @data: pointer to an array of MSI-X entries
* @nvec: number of MSI-X vectors requested for allocation by device driver
*
- * Allocate additional MSI-X vectors requested by device driver. A
- * return of zero indicates the successful setup of MSI-X capability
- * structure with new allocated MSI-X vectors or non-zero for otherwise.
+ * Setup the MSI-X capability structure of device function with the number
+ * of requested vectors upon its software driver call to request for
+ * MSI-X mode enabled on its hardware device function. A return of zero
+ * indicates the successful configuration of MSI-X capability structure
+ * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
+ * Or a return of > 0 indicates that driver request is exceeding the number
+ * of vectors available. Driver should use the returned value to re-send
+ * its request.
**/
-int msi_alloc_vectors(struct pci_dev* dev, int *vector, int nvec)
+int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
{
- struct msi_desc *entry;
- int i, head, pos, vec, free_vectors, alloc_vectors;
- int *vectors = (int *)vector;
- u32 control;
+ int status, pos, nr_entries, free_vectors;
+ int i, j, temp;
+ u16 control;
unsigned long flags;
- if (!pci_msi_enable || !dev)
+ if (!pci_msi_enable || !dev || !entries)
return -EINVAL;
+ if ((status = msi_init()) < 0)
+ return status;
+
if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)))
return -EINVAL;
- dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos), 2, &control);
- if (nvec > multi_msix_capable(control))
+ pci_read_config_word(dev, msi_control_reg(pos), &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE)
+ return -EINVAL; /* Already in MSI-X mode */
+
+ nr_entries = multi_msix_capable(control);
+ if (nvec > nr_entries)
return -EINVAL;
- spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[dev->irq];
- if (!entry || entry->dev != dev || /* legal call */
- entry->msi_attrib.type != PCI_CAP_ID_MSIX || /* must be MSI-X */
- entry->link.head != entry->link.tail) { /* already multi */
- spin_unlock_irqrestore(&msi_lock, flags);
+ /* Check for any invalid entries */
+ for (i = 0; i < nvec; i++) {
+ if (entries[i].entry >= nr_entries)
+ return -EINVAL; /* invalid entry */
+ for (j = i + 1; j < nvec; j++) {
+ if (entries[i].entry == entries[j].entry)
+ return -EINVAL; /* duplicate entry */
+ }
+ }
+ temp = dev->irq;
+ if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
+ /* Lookup Sucess */
+ nr_entries = nvec;
+ /* Reroute MSI-X table */
+ if (reroute_msix_table(dev->irq, entries, &nr_entries)) {
+ /* #requested > #previous-assigned */
+ dev->irq = temp;
+ return nr_entries;
+ }
+ dev->irq = temp;
+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
+ return 0;
+ }
+ /* Check whether driver already requested for MSI vector */
+ if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
+ !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
+ printk(KERN_INFO "Can't enable MSI-X. Device already had MSI vector assigned\n");
+ dev->irq = temp;
return -EINVAL;
}
+
+ spin_lock_irqsave(&msi_lock, flags);
/*
* msi_lock is provided to ensure that enough vectors resources are
* available before granting.
free_vectors /= nr_msix_devices;
spin_unlock_irqrestore(&msi_lock, flags);
- if (nvec > free_vectors)
- return -EBUSY;
-
- alloc_vectors = 0;
- head = dev->irq;
- for (i = 0; i < nvec; i++) {
- if ((vec = msi_alloc_vector(dev, head)) < 0)
- break;
- *(vectors + i) = vec;
- head = vec;
- alloc_vectors++;
- }
- if (alloc_vectors != nvec) {
- for (i = 0; i < alloc_vectors; i++) {
- vec = *(vectors + i);
- msi_free_vector(dev, vec);
- }
- spin_lock_irqsave(&msi_lock, flags);
- msi_desc[dev->irq]->link.tail = msi_desc[dev->irq]->link.head;
- spin_unlock_irqrestore(&msi_lock, flags);
- return -EBUSY;
+ if (nvec > free_vectors) {
+ if (free_vectors > 0)
+ return free_vectors;
+ else
+ return -EBUSY;
}
- if (nr_msix_devices > 0)
+
+ status = msix_capability_init(dev, entries, nvec);
+ if (!status && nr_msix_devices > 0)
nr_msix_devices--;
- return 0;
+ return status;
}
-/**
- * msi_free_vectors - reclaim MSI-X vectors to unused state
- * @dev: pointer to the pci_dev data structure of MSI-X device function
- * @vector: pointer to an array of released MSI-X vectors
- * @nvec: number of MSI-X vectors requested for release by device driver
- *
- * Reclaim MSI-X vectors released by device driver to unused state,
- * which may be used later on. A return of zero indicates the
- * success or non-zero for otherwise. Device driver should call this
- * before calling function free_irq.
- **/
-int msi_free_vectors(struct pci_dev* dev, int *vector, int nvec)
+void pci_disable_msix(struct pci_dev* dev)
{
- struct msi_desc *entry;
- int i;
- unsigned long flags;
+ int pos, temp;
+ u16 control;
- if (!pci_msi_enable)
- return -EINVAL;
+ if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)))
+ return;
- spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[dev->irq];
- if (!entry || entry->dev != dev ||
- entry->msi_attrib.type != PCI_CAP_ID_MSIX ||
- entry->link.head == entry->link.tail) { /* Nothing to free */
+ pci_read_config_word(dev, msi_control_reg(pos), &control);
+ if (!(control & PCI_MSIX_FLAGS_ENABLE))
+ return;
+
+ temp = dev->irq;
+ if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
+ int state, vector, head, tail = 0, warning = 0;
+ unsigned long flags;
+
+ vector = head = dev->irq;
+ spin_lock_irqsave(&msi_lock, flags);
+ while (head != tail) {
+ state = msi_desc[vector]->msi_attrib.state;
+ if (state)
+ warning = 1;
+ else {
+ vector_irq[vector] = 0; /* free it */
+ nr_released_vectors++;
+ }
+ tail = msi_desc[vector]->link.tail;
+ vector = tail;
+ }
spin_unlock_irqrestore(&msi_lock, flags);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&msi_lock, flags);
+ if (warning) {
+ dev->irq = temp;
+ printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on all vectors\n",
+ dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+ BUG_ON(warning > 0);
+ } else {
+ dev->irq = temp;
+ disable_msi_mode(dev,
+ pci_find_capability(dev, PCI_CAP_ID_MSIX),
+ PCI_CAP_ID_MSIX);
- for (i = 0; i < nvec; i++) {
- if (*(vector + i) == dev->irq)
- continue;/* Don't free entry 0 if mistaken by driver */
- msi_free_vector(dev, *(vector + i));
+ }
}
-
- return 0;
}
/**
**/
void msi_remove_pci_irq_vectors(struct pci_dev* dev)
{
- struct msi_desc *entry;
- int type, temp;
+ int state, pos, temp;
unsigned long flags;
if (!pci_msi_enable || !dev)
return;
- if (!pci_find_capability(dev, PCI_CAP_ID_MSI)) {
- if (!pci_find_capability(dev, PCI_CAP_ID_MSIX))
- return;
- }
- temp = dev->irq;
- if (msi_lookup_vector(dev))
- return;
-
- spin_lock_irqsave(&msi_lock, flags);
- entry = msi_desc[dev->irq];
- if (!entry || entry->dev != dev) {
+ temp = dev->irq; /* Save IOAPIC IRQ */
+ if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) > 0 &&
+ !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
+ spin_lock_irqsave(&msi_lock, flags);
+ state = msi_desc[dev->irq]->msi_attrib.state;
spin_unlock_irqrestore(&msi_lock, flags);
- return;
+ if (state) {
+ printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on vector->%d\n",
+ dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), dev->irq);
+ BUG_ON(state > 0);
+ } else /* Release MSI vector assigned to this device */
+ msi_free_vector(dev, dev->irq, 0);
+ dev->irq = temp; /* Restore IOAPIC IRQ */
}
- type = entry->msi_attrib.type;
- spin_unlock_irqrestore(&msi_lock, flags);
+ if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 &&
+ !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
+ int vector, head, tail = 0, warning = 0;
+ unsigned long base = 0L;
- msi_free_vector(dev, dev->irq);
- if (type == PCI_CAP_ID_MSIX) {
- int i, pos, dev_msi_cap;
- u32 phys_addr, table_offset;
- u32 control;
- u8 bir;
-
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- dev->bus->ops->read(dev->bus, dev->devfn, msi_control_reg(pos), 2, &control);
- dev_msi_cap = multi_msix_capable(control);
- dev->bus->ops->read(dev->bus, dev->devfn,
- msix_table_offset_reg(pos), 4, &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
- phys_addr = pci_resource_start (dev, bir);
- phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK);
- for (i = FIRST_DEVICE_VECTOR; i < NR_IRQS; i++) {
+ vector = head = dev->irq;
+ while (head != tail) {
spin_lock_irqsave(&msi_lock, flags);
- if (!msi_desc[i] || msi_desc[i]->dev != dev) {
- spin_unlock_irqrestore(&msi_lock, flags);
- continue;
- }
+ state = msi_desc[vector]->msi_attrib.state;
+ tail = msi_desc[vector]->link.tail;
+ base = msi_desc[vector]->mask_base;
spin_unlock_irqrestore(&msi_lock, flags);
- msi_free_vector(dev, i);
+ if (state)
+ warning = 1;
+ else if (vector != head) /* Release MSI-X vector */
+ msi_free_vector(dev, vector, 0);
+ vector = tail;
+ }
+ msi_free_vector(dev, vector, 0);
+ if (warning) {
+ /* Force to release the MSI-X memory-mapped table */
+ u32 phys_addr, table_offset;
+ u16 control;
+ u8 bir;
+
+ pci_read_config_word(dev, msi_control_reg(pos),
+ &control);
+ pci_read_config_dword(dev, msix_table_offset_reg(pos),
+ &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+ phys_addr = pci_resource_start (dev, bir);
+ phys_addr += (u32)(table_offset &
+ ~PCI_MSIX_FLAGS_BIRMASK);
+ iounmap((void*)base);
+ release_mem_region(phys_addr, PCI_MSIX_ENTRY_SIZE *
+ multi_msix_capable(control));
+ printk(KERN_DEBUG "Driver[%d:%d:%d] unloaded wo doing free_irq on all vectors\n",
+ dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+ BUG_ON(warning > 0);
}
- writel(1, entry->mask_base + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
- iounmap((void*)entry->mask_base);
- release_mem_region(phys_addr, dev_msi_cap * PCI_MSIX_ENTRY_SIZE);
+ dev->irq = temp; /* Restore IOAPIC IRQ */
}
- dev->irq = temp;
- nr_reserved_vectors++;
}
EXPORT_SYMBOL(pci_enable_msi);
-EXPORT_SYMBOL(msi_alloc_vectors);
-EXPORT_SYMBOL(msi_free_vectors);
+EXPORT_SYMBOL(pci_disable_msi);
+EXPORT_SYMBOL(pci_enable_msix);
+EXPORT_SYMBOL(pci_disable_msix);
struct {
__u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
__u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 reserved: 2; /* reserved */
+ __u8 state : 1; /* {0: free, 1: busy} */
+ __u8 reserved: 1; /* reserved */
__u8 entry_nr; /* specific enabled entry */
__u8 default_vector; /* default pre-assigned vector */
__u8 current_cpu; /* current destination cpu */
case 0x8070: /* P4G8X Deluxe */
asus_hides_smbus = 1;
}
+ if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
+ switch (dev->subsystem_device) {
+ case 0x1751: /* M2N notebook */
+ asus_hides_smbus = 1;
+ }
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch(dev->subsystem_device) {
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge },
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc },
file under either the MPL or the GPL.
======================================================================*/
-/*
- * Please see linux/Documentation/arm/SA1100/PCMCIA for more information
- * on the low-level kernel interface.
- */
#include <linux/module.h>
#include <linux/init.h>
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_EVENT, 0, PNP_TS1, PNP_DS, 0, 0 ,0 ,0,
- event, sizeof(u16), 0, 0);
+ event, sizeof(u16), NULL, 0);
return status;
}
#endif
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0,
- info, 65536, 0, 0);
+ info, 65536, NULL, 0);
return status;
}
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0,
- data, sizeof(struct pnp_isa_config_struc), 0, 0);
+ data, sizeof(struct pnp_isa_config_struc), NULL, 0);
return status;
}
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4, PNP_TS1, PNP_DS,
- data, sizeof(struct escd_info_struc), 0, 0);
+ data, sizeof(struct escd_info_struc), NULL, 0);
return status;
}
DBF_EVENT(6, "TCHAR:read\n");
device = (struct tape_device *) filp->private_data;
- /* Check position. */
- if (ppos != &filp->f_pos) {
- /*
- * "A request was outside the capabilities of the device."
- * This check uses internal knowledge about how pread and
- * read work...
- */
- DBF_EVENT(6, "TCHAR:ppos wrong\n");
- return -EOVERFLOW;
- }
/*
* If the tape isn't terminated yet, do it now. And since we then
DBF_EVENT(6, "TCHAR:write\n");
device = (struct tape_device *) filp->private_data;
- /* Check position */
- if (ppos != &filp->f_pos) {
- /* "A request was outside the capabilities of the device." */
- DBF_EVENT(6, "TCHAR:ppos wrong\n");
- return -EOVERFLOW;
- }
/* Find out block size and number of blocks */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
- return 0;
+ return nonseekable_open(inode, filp);
}
tape_put_device(device);
/*
* drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
- * $Revision: 1.114 $
+ * $Revision: 1.115 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
return -ENOMEM;
memset(chp, 0, sizeof(struct channel_path));
- chps[chpid] = chp;
-
/* fill in status, etc. */
chp->id = chpid;
chp->state = 1;
if (ret) {
printk(KERN_WARNING "%s: could not register %02x\n",
__func__, chpid);
- return ret;
+ goto out_free;
}
ret = device_create_file(&chp->dev, &dev_attr_status);
- if (ret)
+ if (ret) {
device_unregister(&chp->dev);
-
+ goto out_free;
+ } else
+ chps[chpid] = chp;
+ return ret;
+out_free:
+ kfree(chp);
return ret;
}
extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *);
/* Use with care. */
+#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
extern int cio_is_console(int irq);
extern struct subchannel *cio_get_console_subchannel(void);
+#else
+#define cio_is_console(irq) 0
+#define cio_get_console_subchannel() NULL
+#endif
extern int cio_show_msg;
};
if (notify) {
/* Get device online again. */
+ cdev->private->state = DEV_STATE_OFFLINE;
ccw_device_online(cdev);
wake_up(&cdev->private->wait_q);
return;
/*
* drivers/s390/cio/device_ops.c
*
- * $Revision: 1.47 $
+ * $Revision: 1.50 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
+#include <linux/delay.h>
#include <asm/ccwdev.h>
#include <asm/idals.h>
if ((ret == -EBUSY) || (ret == -EACCES)) {
/* Try again later. */
spin_unlock_irq(&sch->lock);
- schedule_timeout(1);
+ msleep(10);
spin_lock_irq(&sch->lock);
continue;
}
break;
/* Try again later. */
spin_unlock_irq(&sch->lock);
- schedule_timeout(1);
+ msleep(10);
spin_lock_irq(&sch->lock);
} while (1);
(irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
ccw_device_path_notoper(cdev);
- if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) {
+ if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
+ (irb->scsw.dstat & DEV_STAT_CHN_END)) {
cdev->private->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
#include "ioasm.h"
#include "chsc.h"
-#define VERSION_QDIO_C "$Revision: 1.83 $"
+#define VERSION_QDIO_C "$Revision: 1.84 $"
/****************** MODULE PARAMETER VARIABLES ********************/
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
static debug_info_t *qdio_dbf_sbal;
static debug_info_t *qdio_dbf_trace;
static debug_info_t *qdio_dbf_sense;
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
static debug_info_t *qdio_dbf_slsb_out;
static debug_info_t *qdio_dbf_slsb_in;
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
/* iQDIO stuff: */
static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
qdio_is_outbound_q_done(struct qdio_q *q)
{
int no_used;
+#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
+#endif
no_used=atomic_read(&q->number_of_buffers_used);
+#ifdef CONFIG_QDIO_DEBUG
if (no_used) {
sprintf(dbf_text,"oqisnt%02x",no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
QDIO_DBF_TEXT4(0,trace,"oqisdone");
}
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+#endif /* CONFIG_QDIO_DEBUG */
return (no_used==0);
}
qdio_kick_outbound_q(struct qdio_q *q)
{
int result;
+#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
QDIO_DBF_TEXT4(0,trace,"kickoutq");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+#endif /* CONFIG_QDIO_DEBUG */
if (!q->siga_out)
return;
switch (result) {
case 0:
- /* went smooth this time, reset timestamp */
+ /* went smooth this time, reset timestamp */
+#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT3(0,trace,"cc2reslv");
sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
atomic_read(&q->busy_siga_counter));
QDIO_DBF_TEXT3(0,trace,dbf_text);
q->timing.busy_start=0;
+#endif /* CONFIG_QDIO_DEBUG */
break;
case (2|QDIO_SIGA_ERROR_B_BIT_SET):
/* cc=2 and busy bit: */
- atomic_inc(&q->busy_siga_counter);
+ atomic_inc(&q->busy_siga_counter);
/* if the last siga was successful, save
* timestamp here */
break;
}
QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
+#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
atomic_read(&q->busy_siga_counter));
QDIO_DBF_TEXT3(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
/* else fallthrough and report error */
default:
/* for plain cc=1, 2 or 3: */
qdio_kick_outbound_handler(struct qdio_q *q)
{
int start, end, real_end, count;
+#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
+#endif
start = q->first_element_to_kick;
/* last_move_ftc was just updated */
count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
(QDIO_MAX_BUFFERS_PER_Q-1);
+#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0,trace,"kickouth");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"s=%2xc=%2x",start,count);
QDIO_DBF_TEXT4(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
if (q->state==QDIO_IRQ_STATE_ACTIVE)
q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
int f,f_mod_no;
volatile char *slsb;
int first_not_to_check;
+#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
+#endif /* CONFIG_QDIO_DEBUG */
#ifdef QDIO_USE_PROCESSING_STATE
int last_position=-1;
#endif /* QDIO_USE_PROCESSING_STATE */
/* P_ERROR means frontier is reached, break and report error */
case SLSB_P_INPUT_ERROR:
+#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text,"inperr%2x",f_mod_no);
QDIO_DBF_TEXT3(1,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
/* kind of process the buffer */
iqdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
+#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
+#endif
no_used=atomic_read(&q->number_of_buffers_used);
/* propagate the change from 82 to 80 through VM */
SYNC_MEMORY;
+#ifdef CONFIG_QDIO_DEBUG
if (no_used) {
sprintf(dbf_text,"iqisnt%02x",no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
QDIO_DBF_TEXT4(0,trace,"iniqisdo");
}
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+#endif /* CONFIG_QDIO_DEBUG */
if (!no_used)
return 1;
qdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
+#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
+#endif
no_used=atomic_read(&q->number_of_buffers_used);
* has (probably) not moved (see qdio_inbound_processing)
*/
if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
+#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0,trace,"inqisdon");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
return 1;
} else {
+#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0,trace,"inqisntd");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
return 0;
}
}
qdio_kick_inbound_handler(struct qdio_q *q)
{
int count, start, end, real_end, i;
+#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
+#endif
QDIO_DBF_TEXT4(0,trace,"kickinh");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
}
+#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text,"s=%2xc=%2x",start,count);
QDIO_DBF_TEXT4(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
q->handler(q->cdev,
qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
{
int i;
+#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
QDIO_DBF_TEXT5(0,trace,"newstate");
sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state);
QDIO_DBF_TEXT5(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
irq_ptr->state=state;
for (i=0;i<irq_ptr->no_input_qs;i++)
int cstat,dstat;
char dbf_text[15];
+#ifdef CONFIG_QDIO_DEBUG
QDIO_DBF_TEXT4(0, trace, "qint");
sprintf(dbf_text, "%s", cdev->dev.bus_id);
QDIO_DBF_TEXT4(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
if (!intparm) {
QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
qdio_irq_check_sense(irq_ptr->irq, irb);
+#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text, "state:%d", irq_ptr->state);
QDIO_DBF_TEXT4(0, trace, dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
cstat = irb->scsw.cstat;
dstat = irb->scsw.dstat;
int cc;
struct qdio_q *q;
struct qdio_irq *irq_ptr;
- char dbf_text[15]="SyncXXXX";
void *ptr;
+#ifdef CONFIG_QDIO_DEBUG
+ char dbf_text[15]="SyncXXXX";
+#endif
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
+#ifdef CONFIG_QDIO_DEBUG
*((int*)(&dbf_text[4])) = irq_ptr->irq;
QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
*((int*)(&dbf_text[0]))=flags;
*((int*)(&dbf_text[4]))=queue_number;
QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
+#endif /* CONFIG_QDIO_DEBUG */
if (flags&QDIO_FLAG_SYNC_INPUT) {
q=irq_ptr->input_qs[queue_number];
unsigned int count,struct qdio_buffer *buffers)
{
struct qdio_irq *irq_ptr;
-
+#ifdef CONFIG_QDIO_DEBUG
char dbf_text[20];
sprintf(dbf_text,"doQD%04x",cdev->private->irq);
- QDIO_DBF_TEXT3(0,trace,dbf_text);
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
(count>QDIO_MAX_BUFFERS_PER_Q) ||
if (!irq_ptr)
return -ENODEV;
+#ifdef CONFIG_QDIO_DEBUG
if (callflags&QDIO_FLAG_SYNC_INPUT)
QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
sizeof(void*));
QDIO_DBF_TEXT3(0,trace,dbf_text);
sprintf(dbf_text,"qi%02xct%02x",qidx,count);
QDIO_DBF_TEXT3(0,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
return -EBUSY;
debug_unregister(qdio_dbf_sense);
if (qdio_dbf_trace)
debug_unregister(qdio_dbf_trace);
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
if (qdio_dbf_slsb_out)
debug_unregister(qdio_dbf_slsb_out);
if (qdio_dbf_slsb_in)
debug_unregister(qdio_dbf_slsb_in);
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
}
static int
debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
QDIO_DBF_SLSB_OUT_INDEX,
QDIO_DBF_SLSB_OUT_NR_AREAS,
goto oom;
debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
return 0;
oom:
QDIO_PRINT_ERR("not enough memory for dbf.\n");
#ifndef _CIO_QDIO_H
#define _CIO_QDIO_H
-#define VERSION_CIO_QDIO_H "$Revision: 1.24 $"
+#define VERSION_CIO_QDIO_H "$Revision: 1.26 $"
-//#define QDIO_DBF_LIKE_HELL
-
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
#define QDIO_VERBOSE_LEVEL 9
-#else /* QDIO_DBF_LIKE_HELL */
+#else /* CONFIG_QDIO_DEBUG */
#define QDIO_VERBOSE_LEVEL 5
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_USE_PROCESSING_STATE
#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len)
#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len)
#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len)
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len)
#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len)
#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len)
#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len)
-#else /* QDIO_DBF_LIKE_HELL */
+#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0)
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text)
#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text)
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
-#else /* QDIO_DBF_LIKE_HELL */
+#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SETUP_NAME "qdio_setup"
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_INDEX 2
#define QDIO_DBF_SETUP_NR_AREAS 1
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SETUP_LEVEL 6
-#else /* QDIO_DBF_LIKE_HELL */
+#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SETUP_LEVEL 2
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
#define QDIO_DBF_SBAL_LEN 256
#define QDIO_DBF_SBAL_INDEX 2
#define QDIO_DBF_SBAL_NR_AREAS 2
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SBAL_LEVEL 6
-#else /* QDIO_DBF_LIKE_HELL */
+#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SBAL_LEVEL 2
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_NAME "qdio_trace"
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TRACE_INDEX 4
#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
-#else /* QDIO_DBF_LIKE_HELL */
+#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_INDEX 2
#define QDIO_DBF_TRACE_LEVEL 2
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SENSE_NAME "qdio_sense"
#define QDIO_DBF_SENSE_LEN 64
#define QDIO_DBF_SENSE_INDEX 1
#define QDIO_DBF_SENSE_NR_AREAS 1
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SENSE_LEVEL 6
-#else /* QDIO_DBF_LIKE_HELL */
+#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SENSE_LEVEL 2
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
#define QDIO_DBF_SLSB_IN_INDEX 8
#define QDIO_DBF_SLSB_IN_NR_AREAS 1
#define QDIO_DBF_SLSB_IN_LEVEL 6
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_PRINTK_HEADER QDIO_NAME ": "
#define QDIO_GET_ADDR(x) ((__u32)(long)x)
#endif /* CONFIG_ARCH_S390X */
-#ifdef QDIO_DBF_LIKE_HELL
+#ifdef CONFIG_QDIO_DEBUG
#define set_slsb(x,y) \
if(q->queue_type==QDIO_TRACE_QTYPE) { \
if(q->is_input_q) { \
QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
} \
}
-#else /* QDIO_DBF_LIKE_HELL */
+#else /* CONFIG_QDIO_DEBUG */
#define set_slsb(x,y) qdio_set_slsb(x,y)
-#endif /* QDIO_DBF_LIKE_HELL */
+#endif /* CONFIG_QDIO_DEBUG */
struct qdio_q {
volatile struct slsb slsb;
/*
*
- * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.1 $)
+ * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.4 $)
*
- * Linux on zSeries OSA Express and HiperSockets support
+ * CTC / ESCON network driver - s390 dbf exploit.
*
* Copyright 2000,2003 IBM Corporation
*
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
- * $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $
+ * $Revision: 1.4 $ $Date: 2004/08/04 10:11:59 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
/**
* Debug Facility Stuff
*/
-debug_info_t *dbf_setup = NULL;
-debug_info_t *dbf_data = NULL;
-debug_info_t *dbf_trace = NULL;
+debug_info_t *ctc_dbf_setup = NULL;
+debug_info_t *ctc_dbf_data = NULL;
+debug_info_t *ctc_dbf_trace = NULL;
-DEFINE_PER_CPU(char[256], dbf_txt_buf);
+DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
void
-unregister_dbf_views(void)
+ctc_unregister_dbf_views(void)
{
- if (dbf_setup)
- debug_unregister(dbf_setup);
- if (dbf_data)
- debug_unregister(dbf_data);
- if (dbf_trace)
- debug_unregister(dbf_trace);
+ if (ctc_dbf_setup)
+ debug_unregister(ctc_dbf_setup);
+ if (ctc_dbf_data)
+ debug_unregister(ctc_dbf_data);
+ if (ctc_dbf_trace)
+ debug_unregister(ctc_dbf_trace);
}
int
-register_dbf_views(void)
+ctc_register_dbf_views(void)
{
- dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
+ ctc_dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
CTC_DBF_SETUP_INDEX,
CTC_DBF_SETUP_NR_AREAS,
CTC_DBF_SETUP_LEN);
- dbf_data = debug_register(CTC_DBF_DATA_NAME,
+ ctc_dbf_data = debug_register(CTC_DBF_DATA_NAME,
CTC_DBF_DATA_INDEX,
CTC_DBF_DATA_NR_AREAS,
CTC_DBF_DATA_LEN);
- dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
+ ctc_dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
CTC_DBF_TRACE_INDEX,
CTC_DBF_TRACE_NR_AREAS,
CTC_DBF_TRACE_LEN);
- if ((dbf_setup == NULL) || (dbf_data == NULL) ||
- (dbf_trace == NULL)) {
- unregister_dbf_views();
+ if ((ctc_dbf_setup == NULL) || (ctc_dbf_data == NULL) ||
+ (ctc_dbf_trace == NULL)) {
+ ctc_unregister_dbf_views();
return -ENOMEM;
}
- debug_register_view(dbf_setup, &debug_hex_ascii_view);
- debug_set_level(dbf_setup, CTC_DBF_SETUP_LEVEL);
+ debug_register_view(ctc_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(ctc_dbf_setup, CTC_DBF_SETUP_LEVEL);
- debug_register_view(dbf_data, &debug_hex_ascii_view);
- debug_set_level(dbf_data, CTC_DBF_DATA_LEVEL);
+ debug_register_view(ctc_dbf_data, &debug_hex_ascii_view);
+ debug_set_level(ctc_dbf_data, CTC_DBF_DATA_LEVEL);
- debug_register_view(dbf_trace, &debug_hex_ascii_view);
- debug_set_level(dbf_trace, CTC_DBF_TRACE_LEVEL);
+ debug_register_view(ctc_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(ctc_dbf_trace, CTC_DBF_TRACE_LEVEL);
return 0;
}
/*
*
- * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.1 $)
+ * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.3 $)
*
- * Linux on zSeries OSA Express and HiperSockets support
+ * CTC / ESCON network driver - s390 dbf exploit.
*
* Copyright 2000,2003 IBM Corporation
*
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
- * $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $
+ * $Revision: 1.3 $ $Date: 2004/07/28 12:27:54 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#define DBF_TEXT(name,level,text) \
do { \
- debug_text_event(dbf_##name,level,text); \
+ debug_text_event(ctc_dbf_##name,level,text); \
} while (0)
#define DBF_HEX(name,level,addr,len) \
do { \
- debug_event(dbf_##name,level,(void*)(addr),len); \
+ debug_event(ctc_dbf_##name,level,(void*)(addr),len); \
} while (0)
-extern DEFINE_PER_CPU(char[256], dbf_txt_buf);
-extern debug_info_t *dbf_setup;
-extern debug_info_t *dbf_data;
-extern debug_info_t *dbf_trace;
+extern DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf);
+extern debug_info_t *ctc_dbf_setup;
+extern debug_info_t *ctc_dbf_data;
+extern debug_info_t *ctc_dbf_trace;
#define DBF_TEXT_(name,level,text...) \
do { \
- char* dbf_txt_buf = get_cpu_var(dbf_txt_buf); \
- sprintf(dbf_txt_buf, text); \
- debug_text_event(dbf_##name,level,dbf_txt_buf); \
- put_cpu_var(dbf_txt_buf); \
+ char* ctc_dbf_txt_buf = get_cpu_var(ctc_dbf_txt_buf); \
+ sprintf(ctc_dbf_txt_buf, text); \
+ debug_text_event(ctc_dbf_##name,level,ctc_dbf_txt_buf); \
+ put_cpu_var(ctc_dbf_txt_buf); \
} while (0)
#define DBF_SPRINTF(name,level,text...) \
do { \
- debug_sprintf_event(dbf_trace, level, ##text ); \
- debug_sprintf_event(dbf_trace, level, text ); \
+ debug_sprintf_event(ctc_dbf_trace, level, ##text ); \
+ debug_sprintf_event(ctc_dbf_trace, level, text ); \
} while (0)
-int register_dbf_views(void);
+int ctc_register_dbf_views(void);
-void unregister_dbf_views(void);
+void ctc_unregister_dbf_views(void);
/**
* some more debug stuff
/*
- * $Id: ctcmain.c,v 1.61 2004/07/02 16:31:22 ptiedem Exp $
+ * $Id: ctcmain.c,v 1.63 2004/07/28 12:27:54 ptiedem Exp $
*
* CTC / ESCON network driver
*
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.61 $
+ * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.63 $
*
*/
\f
print_banner(void)
{
static int printed = 0;
- char vbuf[] = "$Revision: 1.61 $";
+ char vbuf[] = "$Revision: 1.63 $";
char *version = vbuf;
if (printed)
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
__u16 len = *((__u16 *) pskb->data);
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
skb_put(pskb, 2 + LL_HEADER_LENGTH);
skb_pull(pskb, 2);
pskb->dev = dev;
if (ch->protocol == CTC_PROTO_LINUX_TTY)
ctc_tty_netif_rx(skb);
else
- netif_rx(skb);
+ netif_rx_ni(skb);
/**
* Successful rx; reset logflags
*/
static void inline
ccw_check_return_code(struct channel *ch, int return_code, char *msg)
{
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
switch (return_code) {
case 0:
fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
static void inline
ccw_unit_check(struct channel *ch, unsigned char sense)
{
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
if (sense & SNS0_INTERVENTION_REQ) {
if (sense & 0x01) {
if (ch->protocol != CTC_PROTO_LINUX_TTY)
{
struct sk_buff *skb;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
while ((skb = skb_dequeue(q))) {
atomic_dec(&skb->users);
static __inline__ int
ctc_checkalloc_buffer(struct channel *ch, int warn)
{
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
if ((ch->trans_skb == NULL) ||
(ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
if (ch->trans_skb != NULL)
unsigned long duration;
struct timespec done_stamp = xtime;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
duration =
(done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
{
struct channel *ch = (struct channel *) arg;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_TXIDLE);
fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
int check_len;
int rc;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (len < 8) {
ctc_pr_debug("%s: got packet with length %d < 8\n",
struct channel *ch = (struct channel *) arg;
int rc;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (fsm_getstate(fi) == CH_STATE_TXIDLE)
ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
__u16 buflen;
int rc;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
fsm_deltimer(&ch->timer);
buflen = *((__u16 *) ch->trans_skb->data);
#ifdef DEBUG
int rc;
unsigned long saveflags;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
fsm_newstate(fi, CH_STATE_SETUPWAIT);
int rc;
struct net_device *dev;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (ch == NULL) {
ctc_pr_warn("ch_action_start ch=NULL\n");
return;
int rc;
int oldstate;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
if (event == CH_EVENT_STOP)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_STOPPED);
if (ch->trans_skb != NULL) {
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_NOTOP);
if (CHANNEL_DIRECTION(ch->flags) == READ) {
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(setup, 2, __FUNCTION__);
+ DBF_TEXT(setup, 3, __FUNCTION__);
/**
* Special case: Got UC_RCRESET on setmode.
* This means that remote side isn't setup. In this case
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: %s channel restart\n", dev->name,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(setup, 2, __FUNCTION__);
+ DBF_TEXT(setup, 3, __FUNCTION__);
if (event == CH_EVENT_TIMER) {
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(setup, 2, __FUNCTION__);
+ DBF_TEXT(setup, 3, __FUNCTION__);
fsm_newstate(fi, CH_STATE_RXERR);
ctc_pr_warn("%s: RX initialization failed\n", dev->name);
ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
struct channel *ch2;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
dev->name);
struct net_device *dev = ch->netdev;
unsigned long saveflags;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (ch->retry++ > 3) {
ctc_pr_debug("%s: TX retry failed, restarting channel\n",
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (CHANNEL_DIRECTION(ch->flags) == READ) {
ctc_pr_debug("%s: RX I/O error\n", dev->name);
struct net_device *dev = ch->netdev;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
ch_action_iofatal(fi, event, arg);
fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
}
{
struct channel *ch = channels;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
#ifdef DEBUG
ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
__func__, id, type);
struct net_device *dev;
struct ctc_priv *priv;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
if (__ctc_check_irb_error(cdev, irb))
return;
struct ctc_priv *privptr = dev->priv;
int direction;
- DBF_TEXT(setup, 2, __FUNCTION__);
+ DBF_TEXT(setup, 3, __FUNCTION__);
fsm_deltimer(&privptr->restart_timer);
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
for (direction = READ; direction <= WRITE; direction++) {
struct ctc_priv *privptr = dev->priv;
int direction;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
for (direction = READ; direction <= WRITE; direction++) {
struct channel *ch = privptr->channel[direction];
struct net_device *dev = (struct net_device *)arg;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
ctc_pr_debug("%s: Restarting\n", dev->name);
dev_action_stop(fi, event, arg);
fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
struct net_device *dev = (struct net_device *) arg;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT_RXTX:
if (event == DEV_EVENT_RXUP)
struct net_device *dev = (struct net_device *) arg;
struct ctc_priv *privptr = dev->priv;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
if (privptr->protocol == CTC_PROTO_LINUX_TTY)
struct ll_header header;
int rc = 0;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
int l = skb->len + LL_HEADER_LENGTH;
static int
ctc_open(struct net_device * dev)
{
+ DBF_TEXT(trace, 5, __FUNCTION__);
fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
return 0;
}
static int
ctc_close(struct net_device * dev)
{
+ DBF_TEXT(trace, 5, __FUNCTION__);
fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
return 0;
}
int rc = 0;
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
/**
* Some sanity checks ...
*/
{
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
if ((new_mtu < 576) || (new_mtu > 65527) ||
(new_mtu > (privptr->channel[READ]->max_bufsize -
LL_HEADER_LENGTH - 2)))
struct net_device *ndev;
int bs1;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
priv = dev->driver_data;
if (!priv)
return -ENODEV;
struct ctc_priv *priv;
int ll1;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
priv = dev->driver_data;
if (!priv)
return -ENODEV;
char *sbuf;
char *p;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (!priv)
return;
sbuf = (char *)kmalloc(2048, GFP_KERNEL);
if (!privptr)
return NULL;
- DBF_TEXT(setup, 2, __FUNCTION__);
+ DBF_TEXT(setup, 3, __FUNCTION__);
if (alloc_device) {
dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
if (!dev)
struct ctc_priv *priv;
int value;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
pr_debug("%s() called\n", __FUNCTION__);
priv = dev->driver_data;
int rc;
pr_debug("%s() called\n", __FUNCTION__);
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
if (!get_device(&cgdev->dev))
return -ENODEV;
int ret;
pr_debug("%s() called\n", __FUNCTION__);
- DBF_TEXT(setup, 2, __FUNCTION__);
+ DBF_TEXT(setup, 3, __FUNCTION__);
privptr = cgdev->dev.driver_data;
if (!privptr)
struct ctc_priv *priv;
struct net_device *ndev;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
pr_debug("%s() called\n", __FUNCTION__);
priv = cgdev->dev.driver_data;
struct ctc_priv *priv;
pr_debug("%s() called\n", __FUNCTION__);
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
priv = cgdev->dev.driver_data;
if (!priv)
{
unregister_cu3088_discipline(&ctc_group_driver);
ctc_tty_cleanup();
- unregister_dbf_views();
+ ctc_unregister_dbf_views();
ctc_pr_info("CTC driver unloaded\n");
}
print_banner();
- ret = register_dbf_views();
+ ret = ctc_register_dbf_views();
if (ret){
- ctc_pr_crit("ctc_init failed with register_dbf_views rc = %d\n", ret);
+ ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
return ret;
}
ctc_tty_init();
ret = register_cu3088_discipline(&ctc_group_driver);
if (ret) {
ctc_tty_cleanup();
- unregister_dbf_views();
+ ctc_unregister_dbf_views();
}
return ret;
}
/*
- * $Id: ctctty.c,v 1.21 2004/07/02 16:31:22 ptiedem Exp $
+ * $Id: ctctty.c,v 1.26 2004/08/04 11:06:55 mschwide Exp $
*
* CTC / ESCON network driver, tty interface.
*
#include <linux/tty.h>
#include <linux/serial_reg.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
#include <linux/devfs_fs_kernel.h>
#include "ctctty.h"
int len;
struct tty_struct *tty;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
c = TTY_FLIPBUF_SIZE - tty->flip.count;
int ret = 1;
struct tty_struct *tty;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
int c = TTY_FLIPBUF_SIZE - tty->flip.count;
{
int i;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
if ((!driver) || ctc_tty_shuttingdown)
return;
for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
int i;
ctc_tty_info *info = NULL;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
if (!skb)
return;
if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
int wake = 1;
int rc;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (!info->netdev) {
if (skb)
kfree_skb(skb);
int skb_res;
struct sk_buff *skb;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
static void
ctc_tty_transmit_status(ctc_tty_info *info)
{
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
info->flags |= CTC_ASYNC_TX_LINESTAT;
unsigned int quot;
int i;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
if (!info->tty || !info->tty->termios)
return;
cflag = info->tty->termios->c_cflag;
static int
ctc_tty_startup(ctc_tty_info * info)
{
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
if (info->flags & CTC_ASYNC_INITIALIZED)
return 0;
#ifdef CTC_DEBUG_MODEM_OPEN
static void
ctc_tty_shutdown(ctc_tty_info * info)
{
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 3, __FUNCTION__);
if (!(info->flags & CTC_ASYNC_INITIALIZED))
return;
#ifdef CTC_DEBUG_MODEM_OPEN
int total = 0;
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 5, __FUNCTION__);
if (ctc_tty_shuttingdown)
goto ex;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write"))
ctc_tty_info *info;
unsigned long flags;
- DBF_TEXT(trace, 2, __FUNCTION__);
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (!tty)
goto ex;
spin_lock_irqsave(&ctc_tty_lock, flags);
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars"))
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_throttle"))
return;
info->mcr &= ~UART_MCR_RTS;
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_unthrottle"))
return;
info->mcr |= UART_MCR_RTS;
uint result;
ulong flags;
+ DBF_TEXT(trace, 4, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, flags);
status = info->lsr;
spin_unlock_irqrestore(&ctc_tty_lock, flags);
uint result;
ulong flags;
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
int error;
int retval;
+ DBF_TEXT(trace, 4, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
unsigned int cflag = tty->termios->c_cflag;
+
+ DBF_TEXT(trace, 4, __FUNCTION__);
ctc_tty_change_speed(info);
/* Handle transition to B0 */
unsigned long flags;
int retval;
+ DBF_TEXT(trace, 4, __FUNCTION__);
/*
* If the device is in the middle of being closed, then block
* until it's done, and then try again.
int retval,
line;
+ DBF_TEXT(trace, 3, __FUNCTION__);
line = tty->index;
if (line < 0 || line > CTC_TTY_MAX_DEVICES)
return -ENODEV;
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
ulong flags;
ulong timeout;
-
+ DBF_TEXT(trace, 3, __FUNCTION__);
if (!info || ctc_tty_paranoia_check(info, tty->name, "ctc_tty_close"))
return;
spin_lock_irqsave(&ctc_tty_lock, flags);
*/
timeout = jiffies + HZ;
while (!(info->lsr & UART_LSR_TEMT)) {
- set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&ctc_tty_lock, flags);
- schedule_timeout(HZ/2);
+ msleep(500);
spin_lock_irqsave(&ctc_tty_lock, flags);
if (time_after(jiffies,timeout))
break;
{
ctc_tty_info *info = (ctc_tty_info *)tty->driver_data;
unsigned long saveflags;
+ DBF_TEXT(trace, 3, __FUNCTION__);
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup"))
return;
ctc_tty_shutdown(info);
unsigned long saveflags;
int again;
+ DBF_TEXT(trace, 3, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, saveflags);
if ((!ctc_tty_shuttingdown) && info) {
again = ctc_tty_tint(info);
ctc_tty_info *info;
struct tty_driver *device;
+ DBF_TEXT(trace, 2, __FUNCTION__);
driver = kmalloc(sizeof(ctc_tty_driver), GFP_KERNEL);
if (driver == NULL) {
printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
char *err;
char *p;
+ DBF_TEXT(trace, 2, __FUNCTION__);
if ((!dev) || (!dev->name)) {
printk(KERN_WARNING
"ctc_tty_register_netdev called "
unsigned long saveflags;
ctc_tty_info *info = NULL;
+ DBF_TEXT(trace, 2, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, saveflags);
for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
if (driver->info[i].netdev == dev) {
ctc_tty_cleanup(void) {
unsigned long saveflags;
+ DBF_TEXT(trace, 2, __FUNCTION__);
spin_lock_irqsave(&ctc_tty_lock, saveflags);
ctc_tty_shuttingdown = 1;
spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
/*
- * $Id: iucv.c,v 1.38 2004/07/09 15:59:53 mschwide Exp $
+ * $Id: iucv.c,v 1.40 2004/08/04 12:29:33 cborntra Exp $
*
* IUCV network driver
*
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.38 $
+ * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.40 $
*
*/
\f
#include <asm/io.h>
#include <asm/s390_ext.h>
#include <asm/ebcdic.h>
+#include <asm/smp.h>
#include <asm/ccwdev.h> //for root device stuff
/* FLAGS:
static void
iucv_banner(void)
{
- char vbuf[] = "$Revision: 1.38 $";
+ char vbuf[] = "$Revision: 1.40 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
iucv_remove_handler(new_handler);
kfree(new_handler);
switch(rc) {
- case -ENODEV:
- err = "No CPU can be reserved";
- break;
case 0x03:
err = "Directory error";
break;
*/
#include <linux/types.h>
+#include <asm/debug.h>
+
+/**
+ * Debug Facility stuff
+ */
+#define IUCV_DBF_SETUP_NAME "iucv_setup"
+#define IUCV_DBF_SETUP_LEN 32
+#define IUCV_DBF_SETUP_INDEX 1
+#define IUCV_DBF_SETUP_NR_AREAS 1
+#define IUCV_DBF_SETUP_LEVEL 3
+
+#define IUCV_DBF_DATA_NAME "iucv_data"
+#define IUCV_DBF_DATA_LEN 128
+#define IUCV_DBF_DATA_INDEX 1
+#define IUCV_DBF_DATA_NR_AREAS 1
+#define IUCV_DBF_DATA_LEVEL 2
+
+#define IUCV_DBF_TRACE_NAME "iucv_trace"
+#define IUCV_DBF_TRACE_LEN 16
+#define IUCV_DBF_TRACE_INDEX 2
+#define IUCV_DBF_TRACE_NR_AREAS 1
+#define IUCV_DBF_TRACE_LEVEL 3
+
+#define IUCV_DBF_TEXT(name,level,text) \
+ do { \
+ debug_text_event(iucv_dbf_##name,level,text); \
+ } while (0)
+
+#define IUCV_DBF_HEX(name,level,addr,len) \
+ do { \
+ debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
+ } while (0)
+
+extern DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+#define IUCV_DBF_TEXT_(name,level,text...) \
+ do { \
+ char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
+ sprintf(iucv_dbf_txt_buf, text); \
+ debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
+ put_cpu_var(iucv_dbf_txt_buf); \
+ } while (0)
+
+#define IUCV_DBF_SPRINTF(name,level,text...) \
+ do { \
+ debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
+ debug_sprintf_event(iucv_dbf_trace, level, text ); \
+ } while (0)
+
+/**
+ * some more debug stuff
+ */
+#define IUCV_HEXDUMP16(importance,header,ptr) \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
+ *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
+ *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
+ *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
+ *(((char*)ptr)+12),*(((char*)ptr)+13), \
+ *(((char*)ptr)+14),*(((char*)ptr)+15)); \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)+16),*(((char*)ptr)+17), \
+ *(((char*)ptr)+18),*(((char*)ptr)+19), \
+ *(((char*)ptr)+20),*(((char*)ptr)+21), \
+ *(((char*)ptr)+22),*(((char*)ptr)+23), \
+ *(((char*)ptr)+24),*(((char*)ptr)+25), \
+ *(((char*)ptr)+26),*(((char*)ptr)+27), \
+ *(((char*)ptr)+28),*(((char*)ptr)+29), \
+ *(((char*)ptr)+30),*(((char*)ptr)+31));
+
+static inline void
+iucv_hex_dump(unsigned char *buf, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (i && !(i % 16))
+ printk("\n");
+ printk("%02x ", *(buf + i));
+ }
+ printk("\n");
+}
+/**
+ * end of debug stuff
+ */
+
#define uchar unsigned char
#define ushort unsigned short
#define ulong unsigned long
* Frank Pavlic (pavlic@de.ibm.com) and
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*
- * $Revision: 1.83 $ $Date: 2004/06/30 12:48:14 $
+ * $Revision: 1.85 $ $Date: 2004/08/04 11:05:43 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/igmp.h>
+#include <linux/delay.h>
#include <net/arp.h>
#include <net/ip.h>
/**
* initialization string for output
*/
-#define VERSION_LCS_C "$Revision: 1.83 $"
+#define VERSION_LCS_C "$Revision: 1.85 $"
static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")";
static char debug_buffer[255];
card->dev->name);
return 0;
}
- schedule_timeout(3 * HZ);
+ msleep(3000);
}
PRINT_ERR("Error in Reseting LCS card!\n");
return -EIO;
/*
- * $Id: netiucv.c,v 1.57 2004/06/30 09:26:40 braunu Exp $
+ * $Id: netiucv.c,v 1.63 2004/07/27 13:36:05 mschwide Exp $
*
* IUCV network driver
*
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: IUCV network driver $Revision: 1.57 $
+ * RELEASE-TAG: IUCV network driver $Revision: 1.63 $
*
*/
\f
MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
\f
+#define PRINTK_HEADER " iucv: " /* for debugging */
+
+static struct device_driver netiucv_driver = {
+ .name = "netiucv",
+ .bus = &iucv_bus,
+};
+
/**
* Per connection profiling data
*/
/**
* Linked list of all connection structs.
*/
-static struct iucv_connection *connections;
+static struct iucv_connection *iucv_connections;
/**
* Representation of event-data for the
* match exactly as specified in order to give connection_pending()
* control.
*/
-static __u8 mask[] = {
+static __u8 netiucv_mask[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
\f
+/**
+ * Debug Facility Stuff
+ */
+static debug_info_t *iucv_dbf_setup = NULL;
+static debug_info_t *iucv_dbf_data = NULL;
+static debug_info_t *iucv_dbf_trace = NULL;
+
+DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+static void
+iucv_unregister_dbf_views(void)
+{
+ if (iucv_dbf_setup)
+ debug_unregister(iucv_dbf_setup);
+ if (iucv_dbf_data)
+ debug_unregister(iucv_dbf_data);
+ if (iucv_dbf_trace)
+ debug_unregister(iucv_dbf_trace);
+}
+static int
+iucv_register_dbf_views(void)
+{
+ iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
+ IUCV_DBF_SETUP_INDEX,
+ IUCV_DBF_SETUP_NR_AREAS,
+ IUCV_DBF_SETUP_LEN);
+ iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
+ IUCV_DBF_DATA_INDEX,
+ IUCV_DBF_DATA_NR_AREAS,
+ IUCV_DBF_DATA_LEN);
+ iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
+ IUCV_DBF_TRACE_INDEX,
+ IUCV_DBF_TRACE_NR_AREAS,
+ IUCV_DBF_TRACE_LEN);
+
+ if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
+ (iucv_dbf_trace == NULL)) {
+ iucv_unregister_dbf_views();
+ return -ENOMEM;
+ }
+ debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
+
+ debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
+ debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
+
+ debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
+
+ return 0;
+}
+
/**
* Callback-wrappers, called from lowlevel iucv layer.
*****************************************************************************/
struct sk_buff *skb;
ll_header *header = (ll_header *)pskb->data;
- if (header->next == 0)
+ if (!header->next)
break;
skb_pull(pskb, NETIUCV_HDRLEN);
offset += header->next;
header->next -= NETIUCV_HDRLEN;
if (skb_tailroom(pskb) < header->next) {
- printk(KERN_WARNING
- "%s: Illegal next field in iucv header: "
+ PRINT_WARN("%s: Illegal next field in iucv header: "
"%d > %d\n",
dev->name, header->next, skb_tailroom(pskb));
+ IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
+ header->next, skb_tailroom(pskb));
return;
}
skb_put(pskb, header->next);
pskb->mac.raw = pskb->data;
skb = dev_alloc_skb(pskb->len);
if (!skb) {
- printk(KERN_WARNING
- "%s Out of memory in netiucv_unpack_skb\n",
+ PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
dev->name);
+ IUCV_DBF_TEXT(data, 2,
+ "Out of memory in netiucv_unpack_skb\n");
privptr->stats.rx_dropped++;
return;
}
struct iucv_event *ev = (struct iucv_event *)arg;
struct iucv_connection *conn = ev->conn;
iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
- struct netiucv_priv *privptr = (struct netiucv_priv *)conn->netdev->priv;
+ struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv;
__u32 msglen = eib->ln1msg2.ipbfln1f;
int rc;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
if (!conn->netdev) {
/* FRITZ: How to tell iucv LL to drop the msg? */
- printk(KERN_WARNING
- "Received data for unlinked connection\n");
+ PRINT_WARN("Received data for unlinked connection\n");
+ IUCV_DBF_TEXT(data, 2,
+ "Received data for unlinked connection\n");
return;
}
if (msglen > conn->max_buffsize) {
/* FRITZ: How to tell iucv LL to drop the msg? */
privptr->stats.rx_dropped++;
+ PRINT_WARN("msglen %d > max_buffsize %d\n",
+ msglen, conn->max_buffsize);
+ IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
+ msglen, conn->max_buffsize);
return;
}
conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
conn->rx_buff->len = 0;
rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls,
conn->rx_buff->data, msglen, NULL, NULL, NULL);
- if (rc != 0 || msglen < 5) {
+ if (rc || msglen < 5) {
privptr->stats.rx_errors++;
- printk(KERN_INFO "iucv_receive returned %08x\n", rc);
+ PRINT_WARN("iucv_receive returned %08x\n", rc);
+ IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
return;
}
netiucv_unpack_skb(conn, conn->rx_buff);
unsigned long saveflags;
ll_header header;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
if (conn && conn->netdev && conn->netdev->priv)
privptr = (struct netiucv_priv *)conn->netdev->priv;
conn->prof.tx_pending++;
if (conn->prof.tx_pending > conn->prof.tx_max_pending)
conn->prof.tx_max_pending = conn->prof.tx_pending;
- if (rc != 0) {
+ if (rc) {
conn->prof.tx_pending--;
fsm_newstate(fi, CONN_STATE_IDLE);
if (privptr)
privptr->stats.tx_errors += txpackets;
- printk(KERN_INFO "iucv_send returned %08x\n",
- rc);
+ PRINT_WARN("iucv_send returned %08x\n", rc);
+ IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
} else {
if (privptr) {
privptr->stats.tx_packets += txpackets;
__u16 msglimit;
__u8 udata[16];
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0,
conn->handle, conn, NULL, &msglimit);
- if (rc != 0) {
- printk(KERN_WARNING
- "%s: IUCV accept failed with error %d\n",
+ if (rc) {
+ PRINT_WARN("%s: IUCV accept failed with error %d\n",
netdev->name, rc);
+ IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
return;
}
fsm_newstate(fi, CONN_STATE_IDLE);
iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
__u8 udata[16];
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
iucv_sever(eib->ippathid, udata);
if (eib->ippathid != conn->pathid) {
- printk(KERN_INFO
- "%s: IR Connection Pending; pathid %d does not match original pathid %d\n",
+ PRINT_INFO("%s: IR Connection Pending; "
+ "pathid %d does not match original pathid %d\n",
netdev->name, eib->ippathid, conn->pathid);
+ IUCV_DBF_TEXT_(data, 2,
+ "connreject: IR pathid %d, conn. pathid %d\n",
+ eib->ippathid, conn->pathid);
iucv_sever(conn->pathid, udata);
}
}
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_IDLE);
if (eib->ippathid != conn->pathid) {
- printk(KERN_INFO
- "%s: IR Connection Complete; pathid %d does not match original pathid %d\n",
+ PRINT_INFO("%s: IR Connection Complete; "
+ "pathid %d does not match original pathid %d\n",
netdev->name, eib->ippathid, conn->pathid);
+ IUCV_DBF_TEXT_(data, 2,
+ "connack: IR pathid %d, conn. pathid %d\n",
+ eib->ippathid, conn->pathid);
conn->pathid = eib->ippathid;
}
netdev->tx_queue_len = eib->ipmsglim;
struct iucv_connection *conn = (struct iucv_connection *)arg;
__u8 udata[16];
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&conn->timer);
iucv_sever(conn->pathid, udata);
struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
__u8 udata[16];
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&conn->timer);
iucv_sever(conn->pathid, udata);
- printk(KERN_INFO "%s: Remote dropped connection\n",
- netdev->name);
+ PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
+ IUCV_DBF_TEXT(data, 2,
+ "conn_action_connsever: Remote dropped connection\n");
fsm_newstate(fi, CONN_STATE_STARTWAIT);
fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
}
__u16 msglimit;
int rc;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- if (conn->handle == 0) {
+ if (!conn->handle) {
+ IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
conn->handle =
- iucv_register_program(iucvMagic, conn->userid, mask,
+ iucv_register_program(iucvMagic, conn->userid,
+ netiucv_mask,
&netiucv_ops, conn);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
- if (conn->handle <= 0) {
+ if (!conn->handle) {
fsm_newstate(fi, CONN_STATE_REGERR);
- conn->handle = 0;
+ conn->handle = NULL;
+ IUCV_DBF_TEXT(setup, 2,
+ "NULL from iucv_register_program\n");
return;
}
- pr_debug("%s('%s'): registered successfully\n",
+ PRINT_DEBUG("%s('%s'): registered successfully\n",
conn->netdev->name, conn->userid);
}
- pr_debug("%s('%s'): connecting ...\n",
+ PRINT_DEBUG("%s('%s'): connecting ...\n",
conn->netdev->name, conn->userid);
/* We must set the state before calling iucv_connect because the callback
fsm_newstate(fi, CONN_STATE_SETUPWAIT);
rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
- conn->userid, iucv_host, 0, NULL, &msglimit, conn->handle,
- conn);
+ conn->userid, iucv_host, 0, NULL, &msglimit,
+ conn->handle, conn);
switch (rc) {
case 0:
conn->netdev->tx_queue_len = msglimit;
CONN_EVENT_TIMER, conn);
return;
case 11:
- printk(KERN_NOTICE
- "%s: User %s is currently not available.\n",
+ PRINT_INFO("%s: User %s is currently not available.\n",
conn->netdev->name,
netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
return;
case 12:
- printk(KERN_NOTICE
- "%s: User %s is currently not ready.\n",
+ PRINT_INFO("%s: User %s is currently not ready.\n",
conn->netdev->name,
netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
return;
case 13:
- printk(KERN_WARNING
- "%s: Too many IUCV connections.\n",
+ PRINT_WARN("%s: Too many IUCV connections.\n",
conn->netdev->name);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 14:
- printk(KERN_WARNING
+ PRINT_WARN(
"%s: User %s has too many IUCV connections.\n",
conn->netdev->name,
netiucv_printname(conn->userid));
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 15:
- printk(KERN_WARNING
+ PRINT_WARN(
"%s: No IUCV authorization in CP directory.\n",
conn->netdev->name);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
default:
- printk(KERN_WARNING
- "%s: iucv_connect returned error %d\n",
+ PRINT_WARN("%s: iucv_connect returned error %d\n",
conn->netdev->name, rc);
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
}
+ IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
+ IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
iucv_unregister_program(conn->handle);
- conn->handle = 0;
+ conn->handle = NULL;
}
static void
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_STOPPED);
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->handle)
+ IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
iucv_unregister_program(conn->handle);
- conn->handle = 0;
+ conn->handle = NULL;
netiucv_purge_skb_queue(&conn->commit_queue);
fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
}
struct iucv_connection *conn = ev->conn;
struct net_device *netdev = conn->netdev;
- printk(KERN_WARNING
- "%s: Cannot connect without username\n",
+ PRINT_WARN("%s: Cannot connect without username\n",
netdev->name);
+ IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
}
static const fsm_node conn_fsm[] = {
struct netiucv_priv *privptr = dev->priv;
struct iucv_event ev;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
ev.conn = privptr->conn;
fsm_newstate(fi, DEV_STATE_STARTWAIT);
struct netiucv_priv *privptr = dev->priv;
struct iucv_event ev;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
ev.conn = privptr->conn;
struct net_device *dev = (struct net_device *)arg;
struct netiucv_priv *privptr = dev->priv;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT:
fsm_newstate(fi, DEV_STATE_RUNNING);
- printk(KERN_INFO
- "%s: connected with remote side %s\n",
+ PRINT_INFO("%s: connected with remote side %s\n",
dev->name, privptr->conn->userid);
+ IUCV_DBF_TEXT(setup, 3,
+ "connection is up and running\n");
break;
case DEV_STATE_STOPWAIT:
- printk(KERN_INFO
- "%s: got connection UP event during shutdown!!\n",
+ PRINT_INFO(
+ "%s: got connection UP event during shutdown!\n",
dev->name);
+ IUCV_DBF_TEXT(data, 2,
+ "dev_action_connup: in DEV_STATE_STOPWAIT\n");
break;
}
}
static void
dev_action_conndown(fsm_instance *fi, int event, void *arg)
{
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
break;
case DEV_STATE_STOPWAIT:
fsm_newstate(fi, DEV_STATE_STOPPED);
+ IUCV_DBF_TEXT(setup, 3, "connection is down\n");
break;
}
}
spin_lock_irqsave(&conn->collect_lock, saveflags);
if (conn->collect_len + l >
- (conn->max_buffsize - NETIUCV_HDRLEN))
+ (conn->max_buffsize - NETIUCV_HDRLEN)) {
rc = -EBUSY;
- else {
+ IUCV_DBF_TEXT(data, 2,
+ "EBUSY from netiucv_transmit_skb\n");
+ } else {
atomic_inc(&skb->users);
skb_queue_tail(&conn->collect_queue, skb);
conn->collect_len += l;
nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
if (!nskb) {
- printk(KERN_WARNING
- "%s: Could not allocate tx_skb\n",
+ PRINT_WARN("%s: Could not allocate tx_skb\n",
conn->netdev->name);
+ IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
rc = -ENOMEM;
return rc;
} else {
conn->prof.tx_pending++;
if (conn->prof.tx_pending > conn->prof.tx_max_pending)
conn->prof.tx_max_pending = conn->prof.tx_pending;
- if (rc != 0) {
+ if (rc) {
struct netiucv_priv *privptr;
fsm_newstate(conn->fsm, CONN_STATE_IDLE);
conn->prof.tx_pending--;
skb_pull(skb, NETIUCV_HDRLEN);
skb_trim(skb, skb->len - NETIUCV_HDRLEN);
}
- printk(KERN_INFO "iucv_send returned %08x\n",
- rc);
+ PRINT_WARN("iucv_send returned %08x\n", rc);
+ IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
} else {
if (copied)
dev_kfree_skb(skb);
*/
static int
netiucv_open(struct net_device *dev) {
- fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START, dev);
+ fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev);
return 0;
}
int rc = 0;
struct netiucv_priv *privptr = dev->priv;
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
/**
* Some sanity checks ...
*/
if (skb == NULL) {
- printk(KERN_WARNING "%s: NULL sk_buff passed\n", dev->name);
+ PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
+ IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
privptr->stats.tx_dropped++;
return 0;
}
- if (skb_headroom(skb) < (NETIUCV_HDRLEN)) {
- printk(KERN_WARNING
- "%s: Got sk_buff with head room < %ld bytes\n",
+ if (skb_headroom(skb) < NETIUCV_HDRLEN) {
+ PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
dev->name, NETIUCV_HDRLEN);
+ IUCV_DBF_TEXT(data, 2,
+ "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
dev_kfree_skb(skb);
privptr->stats.tx_dropped++;
return 0;
return 0;
}
- if (netiucv_test_and_set_busy(dev))
+ if (netiucv_test_and_set_busy(dev)) {
+ IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
return -EBUSY;
-
+ }
dev->trans_start = jiffies;
- if (netiucv_transmit_skb(privptr->conn, skb) != 0)
+ if (netiucv_transmit_skb(privptr->conn, skb))
rc = 1;
netiucv_clear_busy(dev);
return rc;
static struct net_device_stats *
netiucv_stats (struct net_device * dev)
{
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return &((struct netiucv_priv *)dev->priv)->stats;
}
static int
netiucv_change_mtu (struct net_device * dev, int new_mtu)
{
- if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX))
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) {
+ IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
return -EINVAL;
+ }
dev->mtu = new_mtu;
return 0;
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
}
char username[10];
int i;
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count>9) {
- printk(KERN_WARNING
- "netiucv: username too long (%d)!\n", (int)count);
+ PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
+ IUCV_DBF_TEXT_(setup, 2,
+ "%d is length of username\n", (int)count);
return -EINVAL;
}
/* trailing lf, grr */
break;
} else {
- printk(KERN_WARNING
- "netiucv: Invalid character in username!\n");
+ PRINT_WARN("netiucv: Invalid char %c in username!\n",
+ *p);
+ IUCV_DBF_TEXT_(setup, 2,
+ "username: invalid character %c\n",
+ *p);
return -EINVAL;
}
}
username[i++] = ' ';
username[9] = '\0';
- if (memcmp(username, priv->conn->userid, 8) != 0) {
+ if (memcmp(username, priv->conn->userid, 8)) {
/* username changed */
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
- printk(KERN_WARNING
+ PRINT_WARN(
"netiucv: device %s active, connected to %s\n",
dev->bus_id, priv->conn->userid);
- printk(KERN_WARNING
- "netiucv: user cannot be updated\n");
+ PRINT_WARN("netiucv: user cannot be updated\n");
+ IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
return -EBUSY;
}
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%d\n", priv->conn->max_buffsize);
}
char *e;
int bs1;
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count >= 39)
return -EINVAL;
bs1 = simple_strtoul(buf, &e, 0);
if (e && (!isspace(*e))) {
- printk(KERN_WARNING
- "netiucv: Invalid character in buffer!\n");
+ PRINT_WARN("netiucv: Invalid character in buffer!\n");
+ IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
return -EINVAL;
}
if (bs1 > NETIUCV_BUFSIZE_MAX) {
- printk(KERN_WARNING
- "netiucv: Given buffer size %d too large.\n",
+ PRINT_WARN("netiucv: Given buffer size %d too large.\n",
+ bs1);
+ IUCV_DBF_TEXT_(setup, 2,
+ "buffer_write: buffer size %d too large\n",
bs1);
-
return -EINVAL;
}
if ((ndev->flags & IFF_RUNNING) &&
- (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2)))
+ (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
+ PRINT_WARN("netiucv: Given buffer size %d too small.\n",
+ bs1);
+ IUCV_DBF_TEXT_(setup, 2,
+ "buffer_write: buffer size %d too small\n",
+ bs1);
return -EINVAL;
- if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN))
+ }
+ if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
+ PRINT_WARN("netiucv: Given buffer size %d too small.\n",
+ bs1);
+ IUCV_DBF_TEXT_(setup, 2,
+ "buffer_write: buffer size %d too small\n",
+ bs1);
return -EINVAL;
+ }
priv->conn->max_buffsize = bs1;
if (!(ndev->flags & IFF_RUNNING))
dev_fsm_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
}
conn_fsm_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
}
maxmulti_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
}
maxmulti_write (struct device *dev, const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
-
+
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.maxmulti = 0;
return count;
}
maxcq_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.maxcqueue = 0;
return count;
}
sdoio_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.doios_single = 0;
return count;
}
mdoio_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
priv->conn->prof.doios_multi = 0;
return count;
}
txlen_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.txlen = 0;
return count;
}
txtime_show (struct device *dev, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
-
+
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.tx_time = 0;
return count;
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.tx_pending = 0;
return count;
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
}
{
struct netiucv_priv *priv = dev->driver_data;
+ IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
priv->conn->prof.tx_max_pending = 0;
return count;
}
{
int ret;
- pr_debug("%s() called\n", __FUNCTION__);
-
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
if (ret)
return ret;
static inline void
netiucv_remove_files(struct device *dev)
{
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
}
int ret;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (dev) {
memset(dev, 0, sizeof(struct device));
* but legitime ...).
*/
dev->release = (void (*)(struct device *))kfree;
+ dev->driver = &netiucv_driver;
} else
return -ENOMEM;
ret = netiucv_add_files(dev);
if (ret)
goto out_unreg;
- dev->driver_data = priv;
priv->dev = dev;
+ dev->driver_data = priv;
return 0;
out_unreg:
static void
netiucv_unregister_device(struct device *dev)
{
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
netiucv_remove_files(dev);
device_unregister(dev);
}
/**
* Allocate and initialize a new connection structure.
- * Add it to the list of connections;
+ * Add it to the list of netiucv connections;
*/
static struct iucv_connection *
netiucv_new_connection(struct net_device *dev, char *username)
{
- struct iucv_connection **clist = &connections;
+ struct iucv_connection **clist = &iucv_connections;
struct iucv_connection *conn =
(struct iucv_connection *)
kmalloc(sizeof(struct iucv_connection), GFP_KERNEL);
/**
* Release a connection structure and remove it from the
- * list of connections.
+ * list of netiucv connections.
*/
static void
netiucv_remove_connection(struct iucv_connection *conn)
{
- struct iucv_connection **clist = &connections;
-
- pr_debug("%s() called\n", __FUNCTION__);
+ struct iucv_connection **clist = &iucv_connections;
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (conn == NULL)
return;
while (*clist) {
if (*clist == conn) {
*clist = conn->next;
- if (conn->handle != 0) {
+ if (conn->handle) {
iucv_unregister_program(conn->handle);
- conn->handle = 0;
+ conn->handle = NULL;
}
fsm_deltimer(&conn->timer);
kfree_fsm(conn->fsm);
{
struct netiucv_priv *privptr;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (!dev)
return;
netiucv_remove_connection(privptr->conn);
if (privptr->fsm)
kfree_fsm(privptr->fsm);
- privptr->conn = 0; privptr->fsm = 0;
+ privptr->conn = NULL; privptr->fsm = NULL;
/* privptr gets freed by free_netdev() */
}
free_netdev(dev);
netiucv_setup_netdevice);
if (!dev)
return NULL;
+ if (dev_alloc_name(dev, dev->name) < 0) {
+ free_netdev(dev);
+ return NULL;
+ }
- privptr = (struct netiucv_priv *)dev->priv;
+ privptr = (struct netiucv_priv *)dev->priv;
privptr->fsm = init_fsm("netiucvdev", dev_state_names,
dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
- if (privptr->fsm == NULL) {
+ if (!privptr->fsm) {
free_netdev(dev);
return NULL;
}
if (!privptr->conn) {
kfree_fsm(privptr->fsm);
free_netdev(dev);
+ IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
return NULL;
}
fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
int i, ret;
struct net_device *dev;
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count>9) {
- printk(KERN_WARNING
- "netiucv: username too long (%d)!\n", (int)count);
+ PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
+ IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
return -EINVAL;
}
/* trailing lf, grr */
break;
} else {
- printk(KERN_WARNING
- "netiucv: Invalid character in username!\n");
+ PRINT_WARN("netiucv: Invalid character in username!\n");
+ IUCV_DBF_TEXT_(setup, 2,
+ "conn_write: invalid character %c\n", *p);
return -EINVAL;
}
}
username[9] = '\0';
dev = netiucv_init_netdevice(username);
if (!dev) {
- printk(KERN_WARNING
+ PRINT_WARN(
"netiucv: Could not allocate network device structure "
"for user '%s'\n", netiucv_printname(username));
+ IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
return -ENODEV;
}
-
- if ((ret = register_netdev(dev))) {
- goto out_free_ndev;
- }
if ((ret = netiucv_register_device(dev))) {
- unregister_netdev(dev);
+ IUCV_DBF_TEXT_(setup, 2,
+ "ret %d from netiucv_register_device\n", ret);
goto out_free_ndev;
}
/* sysfs magic */
- SET_NETDEV_DEV(dev, (struct device*)((struct netiucv_priv*)dev->priv)->dev);
- printk(KERN_INFO "%s: '%s'\n", dev->name, netiucv_printname(username));
+ SET_NETDEV_DEV(dev,
+ (struct device*)((struct netiucv_priv*)dev->priv)->dev);
+
+ if ((ret = register_netdev(dev))) {
+ netiucv_unregister_device((struct device*)
+ ((struct netiucv_priv*)dev->priv)->dev);
+ goto out_free_ndev;
+ }
+
+ PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
return count;
out_free_ndev:
- printk(KERN_WARNING
- "netiucv: Could not register '%s'\n", dev->name);
+ PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
+ IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
netiucv_free_netdevice(dev);
return ret;
}
static ssize_t
remove_write (struct device_driver *drv, const char *buf, size_t count)
{
- struct iucv_connection **clist = &connections;
+ struct iucv_connection **clist = &iucv_connections;
struct net_device *ndev;
struct netiucv_priv *priv;
struct device *dev;
char *p;
int i;
- pr_debug("%s() called\n", __FUNCTION__);
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count >= IFNAMSIZ)
count = IFNAMSIZ-1;
continue;
}
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
- printk(KERN_WARNING
+ PRINT_WARN(
"netiucv: net device %s active with peer %s\n",
ndev->name, priv->conn->userid);
- printk(KERN_WARNING
- "netiucv: %s cannot be removed\n",
+ PRINT_WARN("netiucv: %s cannot be removed\n",
ndev->name);
+ IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
return -EBUSY;
}
unregister_netdev(ndev);
netiucv_unregister_device(dev);
return count;
}
- printk(KERN_WARNING
- "netiucv: net device %s unknown\n", name);
+ PRINT_WARN("netiucv: net device %s unknown\n", name);
+ IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
return -EINVAL;
}
DRIVER_ATTR(remove, 0200, NULL, remove_write);
-static struct device_driver netiucv_driver = {
- .name = "netiucv",
- .bus = &iucv_bus,
-};
-
static void
netiucv_banner(void)
{
- char vbuf[] = "$Revision: 1.57 $";
+ char vbuf[] = "$Revision: 1.63 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
*p = '\0';
} else
version = " ??? ";
- printk(KERN_INFO "NETIUCV driver Version%s initialized\n", version);
+ PRINT_INFO("NETIUCV driver Version%s initialized\n", version);
}
static void __exit
netiucv_exit(void)
{
- while (connections) {
- struct net_device *ndev = connections->netdev;
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
+ while (iucv_connections) {
+ struct net_device *ndev = iucv_connections->netdev;
struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
struct device *dev = priv->dev;
driver_remove_file(&netiucv_driver, &driver_attr_connection);
driver_remove_file(&netiucv_driver, &driver_attr_remove);
driver_unregister(&netiucv_driver);
+ iucv_unregister_dbf_views();
- printk(KERN_INFO "NETIUCV driver unloaded\n");
+ PRINT_INFO("NETIUCV driver unloaded\n");
return;
}
{
int ret;
+ ret = iucv_register_dbf_views();
+ if (ret) {
+ PRINT_WARN("netiucv_init failed, "
+ "iucv_register_dbf_views rc = %d\n", ret);
+ return ret;
+ }
+ IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
ret = driver_register(&netiucv_driver);
- if (ret != 0) {
- printk(KERN_ERR "NETIUCV: failed to register driver.\n");
+ if (ret) {
+ PRINT_ERR("NETIUCV: failed to register driver.\n");
+ IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret);
+ iucv_unregister_dbf_views();
return ret;
}
/* Add entry for specifying connections. */
ret = driver_create_file(&netiucv_driver, &driver_attr_connection);
- if (ret == 0) {
+ if (!ret) {
ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
netiucv_banner();
} else {
- printk(KERN_ERR "NETIUCV: failed to add driver attribute.\n");
+ PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
+ IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
driver_unregister(&netiucv_driver);
+ iucv_unregister_dbf_views();
}
return ret;
}
#include "qeth_mpc.h"
-#define VERSION_QETH_H "$Revision: 1.111 $"
+#define VERSION_QETH_H "$Revision: 1.113 $"
#ifdef CONFIG_QETH_IPV6
#define QETH_VERSION_IPV6 ":IPv6"
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
+#define atomic_swap(a,b) xchg((int *)a.counter, b)
+
/*
* Common IO related definitions
*/
struct qeth_card;
+enum qeth_out_q_states {
+ QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED,
+ QETH_OUT_Q_LOCKED_FLUSH,
+};
+
struct qeth_qdio_out_q {
struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
int queue_no;
struct qeth_card *card;
- spinlock_t lock;
+ atomic_t state;
volatile int do_pack;
/*
* index of buffer to be filled by driver; state EMPTY or PACKING
#ifndef __QETH_FS_H__
#define __QETH_FS_H__
-#define VERSION_QETH_FS_H "$Revision: 1.8 $"
+#define VERSION_QETH_FS_H "$Revision: 1.9 $"
extern const char *VERSION_QETH_PROC_C;
extern const char *VERSION_QETH_SYS_C;
return "HSTR";
case QETH_LINK_TYPE_GBIT_ETH:
return "OSD_1000";
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ return "OSD_10GIG";
case QETH_LINK_TYPE_LANE_ETH100:
return "OSD_FE_LANE";
case QETH_LINK_TYPE_LANE_TR:
/*
*
- * linux/drivers/s390/net/qeth_main.c ($Revision: 1.125 $)
+ * linux/drivers/s390/net/qeth_main.c ($Revision: 1.130 $)
*
* Linux on zSeries OSA Express and HiperSockets support
*
* Frank Pavlic (pavlic@de.ibm.com) and
* Thomas Spatzier <tspat@de.ibm.com>
*
- * $Revision: 1.125 $ $Date: 2004/06/29 17:28:24 $
+ * $Revision: 1.130 $ $Date: 2004/08/05 11:21:50 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include "qeth_mpc.h"
#include "qeth_fs.h"
-#define VERSION_QETH_C "$Revision: 1.125 $"
+#define VERSION_QETH_C "$Revision: 1.130 $"
static const char *version = "qeth S/390 OSA-Express driver";
/**
}
add_timer(&timer);
wait_event(reply->wait_q, reply->received);
- del_timer(&timer);
+ del_timer_sync(&timer);
rc = reply->rc;
qeth_put_reply(reply);
return rc;
QETH_DBF_TEXT(qerr,2,"unexeob");
QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card));
QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer));
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb_any(skb);
card->stats.rx_errors++;
return NULL;
}
qeth_rebuild_skb(card, skb, hdr);
/* is device UP ? */
if (!(card->dev->flags & IFF_UP)){
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb_any(skb);
continue;
}
skb->dev = card->dev;
static inline struct qeth_buffer_pool_entry *
qeth_get_buffer_pool_entry(struct qeth_card *card)
{
- struct qeth_buffer_pool_entry *entry, *tmp;
+ struct qeth_buffer_pool_entry *entry;
QETH_DBF_TEXT(trace, 6, "gtbfplen");
- entry = NULL;
- list_for_each_entry_safe(entry, tmp,
- &card->qdio.in_buf_pool.entry_list, list){
+ if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
+ entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
+ struct qeth_buffer_pool_entry, list);
list_del_init(&entry->list);
- break;
+ return entry;
}
- return entry;
+ return NULL;
}
static inline void
buf->buffer->element[i].flags = 0;
while ((skb = skb_dequeue(&buf->skb_list))){
atomic_dec(&skb->users);
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb_any(skb);
}
}
buf->next_element_to_fill = 0;
QETH_DBF_TEXT(trace, 2, "flushbuf");
QETH_DBF_TEXT_(trace, 2, " err%d", rc);
queue->card->stats.tx_errors += count;
- /* ok, since do_QDIO went wrong the buffers have not been given
- * to the hardware. they still belong to us, so we can clear
- * them and reuse then, i.e. set back next_buf_to_fill*/
- for (i = index; i < index + count; ++i) {
- buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
- qeth_clear_output_buffer(queue, buf);
- }
- queue->next_buf_to_fill = index;
+ /* this must not happen under normal circumstances. if it
+ * happens something is really wrong -> recover */
+ qeth_schedule_recovery(queue->card);
return;
}
atomic_add(count, &queue->used_buffers);
}
/*
- * switches between PACKING and non-PACKING state if needed.
- * has to be called holding queue->lock
+ * Switched to packing state if the number of used buffers on a queue
+ * reaches a certain limit.
*/
-static inline int
-qeth_switch_packing_state(struct qeth_qdio_out_q *queue)
+static inline void
+qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
{
- struct qeth_qdio_out_buffer *buffer;
- int flush_count = 0;
-
- QETH_DBF_TEXT(trace, 6, "swipack");
if (!queue->do_pack) {
if (atomic_read(&queue->used_buffers)
>= QETH_HIGH_WATERMARK_PACK){
#endif
queue->do_pack = 1;
}
- } else {
+ }
+}
+
+/*
+ * Switches from packing to non-packing mode. If there is a packing
+ * buffer on the queue this buffer will be prepared to be flushed.
+ * In that case 1 is returned to inform the caller. If no buffer
+ * has to be flushed, zero is returned.
+ */
+static inline int
+qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
+{
+ struct qeth_qdio_out_buffer *buffer;
+ int flush_count = 0;
+
+ if (queue->do_pack) {
if (atomic_read(&queue->used_buffers)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
return flush_count;
}
-static inline void
-qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue, int under_int)
+/*
+ * Called to flush a packing buffer if no more pci flags are on the queue.
+ * Checks if there is a packing buffer and prepares it to be flushed.
+ * In that case returns 1, otherwise zero.
+ */
+static inline int
+qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
{
struct qeth_qdio_out_buffer *buffer;
- int index;
- index = queue->next_buf_to_fill;
- buffer = &queue->bufs[index];
+ buffer = &queue->bufs[queue->next_buf_to_fill];
if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)){
/* it's a packing buffer */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
- qeth_flush_buffers(queue, under_int, index, 1);
+ return 1;
+ }
+ return 0;
+}
+
+static inline void
+qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
+{
+ int index;
+ int flush_cnt = 0;
+
+ /*
+ * check if weed have to switch to non-packing mode or if
+ * we have to get a pci flag out on the queue
+ */
+ if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
+ !atomic_read(&queue->set_pci_flags_count)){
+ if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
+ QETH_OUT_Q_UNLOCKED) {
+ /*
+ * If we get in here, there was no action in
+ * do_send_packet. So, we check if there is a
+ * packing buffer to be flushed here.
+ */
+ /* TODO: try if we get a performance improvement
+ * by calling netif_stop_queue here */
+ /* save start index for flushing */
+ index = queue->next_buf_to_fill;
+ flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
+ if (!flush_cnt &&
+ !atomic_read(&queue->set_pci_flags_count))
+ flush_cnt +=
+ qeth_flush_buffers_on_no_pci(queue);
+ /* were done with updating critical queue members */
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ /* flushing can be done outside the lock */
+ if (flush_cnt)
+ qeth_flush_buffers(queue, 1, index, flush_cnt);
+ }
}
}
qeth_clear_output_buffer(queue, buffer);
}
atomic_sub(count, &queue->used_buffers);
+ /* check if we need to do something on this outbound queue */
+ qeth_check_outbound_queue(queue);
netif_wake_queue(card->dev);
#ifdef CONFIG_QETH_PERF_STATS
card->qdio.out_qs[i]->do_pack = 0;
atomic_set(&card->qdio.out_qs[i]->used_buffers,0);
atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
- spin_lock_init(&card->qdio.out_qs[i]->lock);
+ atomic_set(&card->qdio.out_qs[i]->state,
+ QETH_OUT_Q_UNLOCKED);
}
return 0;
}
card->perf_stats.outbound_start_time = qeth_get_micros();
#endif
/*
- * dev_queue_xmit should ensure that we are called packet
- * after packet
+ * We only call netif_stop_queue in case of errors. Since we've
+ * got our own synchronization on queues we can keep the stack's
+ * queue running.
*/
- netif_stop_queue(dev);
- if (!(rc = qeth_send_packet(card, skb)))
- netif_wake_queue(dev);
+ if ((rc = qeth_send_packet(card, skb)))
+ netif_stop_queue(dev);
#ifdef CONFIG_QETH_PERF_STATS
card->perf_stats.outbound_time += qeth_get_micros() -
QETH_DBF_TEXT(trace, 6, "dosndpfa");
- spin_lock(&queue->lock);
+ /* spin until we get the queue ... */
+ while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED,
+ &queue->state));
+ /* ... now we've got the queue */
index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
card->stats.tx_dropped++;
- spin_unlock(&queue->lock);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
qeth_fill_buffer(queue, buffer, (char *)hdr, skb);
qeth_flush_buffers(queue, 0, index, 1);
- spin_unlock(&queue->lock);
return 0;
}
QETH_DBF_TEXT(trace, 6, "dosndpkt");
- spin_lock(&queue->lock);
+ /* spin until we get the queue ... */
+ while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED,
+ &queue->state));
start_index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
- spin_unlock(&queue->lock);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
+ /* check if we need to switch packing state of this queue */
+ qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack){
/* does packet fit in current buffer? */
if((QETH_MAX_BUFFER_ELEMENTS(card) -
/* we did a step forward, so check buffer state again */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
- qeth_flush_buffers(queue, 0, start_index, 1);
- spin_unlock(&queue->lock);
/* return EBUSY because we sent old packet, not
* the current one */
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
}
}
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
}
- /* check if we need to switch packing state of this queue */
- flush_count += qeth_switch_packing_state(queue);
-
+ /*
+ * queue->state will go from LOCKED -> UNLOCKED or from
+ * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
+ * (switch packing state or flush buffer to get another pci flag out).
+ * In that case we will enter this loop
+ */
+ while (atomic_dec_return(&queue->state)){
+ /* check if we can go back to non-packing state */
+ flush_count += qeth_switch_to_nonpacking_if_needed(queue);
+ /*
+ * check if we need to flush a packing buffer to get a pci
+ * flag out on the queue
+ */
+ if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
+ flush_count += qeth_flush_buffers_on_no_pci(queue);
+ }
+ /* at this point the queue is UNLOCKED again */
+out:
if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count);
- if (!atomic_read(&queue->set_pci_flags_count))
- qeth_flush_buffers_on_no_pci(queue, 0);
-
- spin_unlock(&queue->lock);
return rc;
}
switch(regnum){
case MII_BMCR: /* Basic mode control register */
rc = BMCR_FULLDPLX;
- if(card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)
+ if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
+ (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
rc |= BMCR_SPEED100;
break;
case MII_BMSR: /* Basic mode status register */
/*
*
- * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.32 $)
+ * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.33 $)
*
* Linux on zSeries OSA Express and HiperSockets support
* This file contains code related to sysfs.
#include "qeth_mpc.h"
#include "qeth_fs.h"
-const char *VERSION_QETH_SYS_C = "$Revision: 1.32 $";
+const char *VERSION_QETH_SYS_C = "$Revision: 1.33 $";
/*****************************************************************************/
/* */
(card->state != CARD_STATE_RECOVER))
return -EPERM;
- i = simple_strtoul(buf, &tmp, 16);
+ i = simple_strtoul(buf, &tmp, 10);
if ((i < 0) || (i > MAX_ADD_HHLEN)) {
PRINT_WARN("add_hhlen out of range\n");
return -EINVAL;
*/
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_AUX_REVISION "$Revision: 1.114 $"
+#define ZFCP_AUX_REVISION "$Revision: 1.115 $"
#include "zfcp_ext.h"
/* written against the module interface */
static int __init zfcp_module_init(void);
-int zfcp_reboot_handler(struct notifier_block *, unsigned long, void *);
-
/* FCP related */
static void zfcp_ns_gid_pn_handler(unsigned long);
/* initialise configuration rw lock */
rwlock_init(&zfcp_data.config_lock);
- zfcp_data.reboot_notifier.notifier_call = zfcp_reboot_handler;
- register_reboot_notifier(&zfcp_data.reboot_notifier);
-
/* save address of data structure managing the driver module */
zfcp_data.scsi_host_template.module = THIS_MODULE;
goto out;
out_ccw_register:
- unregister_reboot_notifier(&zfcp_data.reboot_notifier);
misc_deregister(&zfcp_cfdc_misc);
out_misc_register:
#ifdef CONFIG_S390_SUPPORT
return retval;
}
-/*
- * This function is called automatically by the kernel whenever a reboot or a
- * shut-down is initiated and zfcp is still loaded
- *
- * locks: zfcp_data.config_sema is taken prior to shutting down the module
- * and removing all structures
- * returns: NOTIFY_DONE in all cases
- */
-int
-zfcp_reboot_handler(struct notifier_block *notifier, unsigned long code,
- void *ptr)
-{
- zfcp_ccw_unregister();
- return NOTIFY_DONE;
-}
-
-
/*
* function: zfcp_cfdc_dev_ioctl
*
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#define ZFCP_CCW_C_REVISION "$Revision: 1.55 $"
+#define ZFCP_CCW_C_REVISION "$Revision: 1.56 $"
#include "zfcp_ext.h"
static int zfcp_ccw_set_online(struct ccw_device *);
static int zfcp_ccw_set_offline(struct ccw_device *);
static int zfcp_ccw_notify(struct ccw_device *, int);
+static void zfcp_ccw_shutdown(struct device *);
static struct ccw_device_id zfcp_ccw_device_id[] = {
{CCW_DEVICE_DEVTYPE(ZFCP_CONTROL_UNIT_TYPE,
.set_online = zfcp_ccw_set_online,
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
+ .driver = {
+ .shutdown = zfcp_ccw_shutdown,
+ },
};
MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
ccw_driver_unregister(&zfcp_ccw_driver);
}
+/**
+ * zfcp_ccw_shutdown - gets called on reboot/shutdown
+ *
+ * Makes sure that QDIO queues are down when the system gets stopped.
+ */
+static void
+zfcp_ccw_shutdown(struct device *dev)
+{
+ struct zfcp_adapter *adapter;
+
+ adapter = dev_get_drvdata(dev);
+ zfcp_erp_adapter_shutdown(adapter, 0);
+ zfcp_erp_wait(adapter);
+}
+
#undef ZFCP_LOG_AREA
#define ZFCP_DEF_H
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_DEF_REVISION "$Revision: 1.75 $"
+#define ZFCP_DEF_REVISION "$Revision: 1.81 $"
/*************************** INCLUDES *****************************************/
#include <linux/miscdevice.h>
#include <linux/major.h>
#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_cmnd.h>
#include <asm/qdio.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
-#include <linux/reboot.h>
#include <linux/mempool.h>
#include <linux/syscalls.h>
#include <linux/ioctl.h>
/********************* GENERAL DEFINES *********************************/
/* zfcp version number, it consists of major, minor, and patch-level number */
-#define ZFCP_VERSION "4.0.0"
+#define ZFCP_VERSION "4.1.3"
static inline void *
zfcp_sg_to_address(struct scatterlist *list)
lists */
struct semaphore config_sema; /* serialises configuration
changes */
- struct notifier_block reboot_notifier; /* used to register cleanup
- functions */
atomic_t loglevel; /* current loglevel */
char init_busid[BUS_ID_SIZE];
wwn_t init_wwpn;
if (ZFCP_LOG_CHECK(level)) { \
_zfcp_hex_dump(addr, count); \
}
-/*
- * Not yet optimal but useful:
- * Waits until the condition is met or the timeout occurs.
- * The condition may be a function call. This allows to
- * execute some additional instructions in addition
- * to a simple condition check.
- * The timeout is modified on exit and holds the remaining time.
- * Thus it is zero if a timeout ocurred, i.e. the condition was
- * not met in the specified interval.
- */
-#define __ZFCP_WAIT_EVENT_TIMEOUT(timeout, condition) \
-do { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- while (!(condition) && timeout) \
- timeout = schedule_timeout(timeout); \
- current->state = TASK_RUNNING; \
-} while (0);
-
-#define ZFCP_WAIT_EVENT_TIMEOUT(waitqueue, timeout, condition) \
-do { \
- wait_queue_t entry; \
- init_waitqueue_entry(&entry, current); \
- add_wait_queue(&waitqueue, &entry); \
- __ZFCP_WAIT_EVENT_TIMEOUT(timeout, condition) \
- remove_wait_queue(&waitqueue, &entry); \
-} while (0);
#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_ERP_REVISION "$Revision: 1.56 $"
+#define ZFCP_ERP_REVISION "$Revision: 1.61 $"
#include "zfcp_ext.h"
int retval = 0;
if (send_els->status != 0) {
- ZFCP_LOG_NORMAL("ELS request timed out, physical port reopen "
- "of port 0x%016Lx on adapter %s failed\n",
+ ZFCP_LOG_NORMAL("ELS request timed out, force physical port "
+ "reopen of port 0x%016Lx on adapter %s\n",
port->wwpn, zfcp_get_busid_by_port(port));
debug_text_event(port->adapter->erp_dbf, 3, "forcreop");
retval = zfcp_erp_port_forced_reopen(port, 0);
zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action)
{
int retval;
- unsigned long timeout;
struct zfcp_adapter *adapter = erp_action->adapter;
retval = zfcp_erp_adapter_strategy_close(erp_action);
ZFCP_LOG_INFO("Waiting to allow the adapter %s "
"to recover itself\n",
zfcp_get_busid_by_adapter(adapter));
- /*
- * SUGGESTION: substitute by
- * timeout = ZFCP_TYPE2_RECOVERY_TIME;
- * __ZFCP_WAIT_EVENT_TIMEOUT(timeout, 0);
- */
- timeout = ZFCP_TYPE2_RECOVERY_TIME;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(timeout);
+ msleep(jiffies_to_msecs(ZFCP_TYPE2_RECOVERY_TIME));
}
return retval;
failed_qdio_activate:
debug_text_event(adapter->erp_dbf, 3, "qdio_down1a");
while (qdio_shutdown(adapter->ccw_device,
- QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
- }
+ QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
+ msleep(1000);
debug_text_event(adapter->erp_dbf, 3, "qdio_down1b");
failed_qdio_establish:
debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
while (qdio_shutdown(adapter->ccw_device,
- QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ);
- }
+ QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
+ msleep(1000);
debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
/*
ZFCP_LOG_DEBUG("host connection still initialising... "
"waiting and retrying...\n");
/* sleep a little bit before retry */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(ZFCP_EXCHANGE_CONFIG_DATA_SLEEP);
+ msleep(jiffies_to_msecs(ZFCP_EXCHANGE_CONFIG_DATA_SLEEP));
}
} while ((retries--) &&
atomic_test_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
*/
/* this drivers version (do not edit !!! generated and updated by cvs) */
-#define ZFCP_FSF_C_REVISION "$Revision: 1.49 $"
+#define ZFCP_FSF_C_REVISION "$Revision: 1.55 $"
#include "zfcp_ext.h"
ZFCP_LOG_DEBUG("fsf req list of adapter %s not yet empty\n",
zfcp_get_busid_by_adapter(adapter));
/* wait for woken intiators to clean up their requests */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(ZFCP_FSFREQ_CLEANUP_TIMEOUT);
+ msleep(jiffies_to_msecs(ZFCP_FSFREQ_CLEANUP_TIMEOUT));
}
/* consistency check */
{
int retval = 0;
unsigned long lock_flags;
+ volatile struct qdio_buffer_element *sbale;
/* setup new FSF request */
retval = zfcp_fsf_req_create(erp_action->adapter,
goto out;
}
+ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
+ erp_action->fsf_req->sbal_curr, 0);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
/* mark port as being closed */
atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
&erp_action->port->status);
unsigned long *lock_flags)
{
int condition;
- unsigned long timeout = ZFCP_SBAL_TIMEOUT;
struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
if (unlikely(req_flags & ZFCP_WAIT_FOR_SBAL)) {
- ZFCP_WAIT_EVENT_TIMEOUT(adapter->request_wq, timeout,
- (condition =
- (zfcp_fsf_req_create_sbal_check)
- (lock_flags, req_queue, 1)));
+ wait_event_interruptible_timeout(adapter->request_wq,
+ (condition =
+ zfcp_fsf_req_create_sbal_check
+ (lock_flags, req_queue, 1)),
+ ZFCP_SBAL_TIMEOUT);
if (!condition) {
return -EIO;
}
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags);
+ ret = -EIO;
goto failed_sbals;
}
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#define ZFCP_SYSFS_PORT_C_REVISION "$Revision: 1.40 $"
+#define ZFCP_SYSFS_PORT_C_REVISION "$Revision: 1.41 $"
#include "zfcp_ext.h"
struct zfcp_unit *unit;
fcp_lun_t fcp_lun;
char *endp;
- int retval = -EINVAL;
+ int retval = 0;
down(&zfcp_data.config_sema);
}
fcp_lun = simple_strtoull(buf, &endp, 0);
- if ((endp + 1) < (buf + count))
+ if ((endp + 1) < (buf + count)) {
+ retval = -EINVAL;
goto out;
+ }
write_lock_irq(&zfcp_data.config_lock);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <asm/oplib.h>
#include <asm/ebus.h>
#define __KERNEL_SYSCALLS__
read_unlock(&tasklist_lock);
if (!found)
break;
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(HZ);
- current->state = TASK_RUNNING;
+ msleep(1000);
}
kenvctrld_task = NULL;
}
wd_dev.initialized = 1;
}
- return(0);
+ return(nonseekable_open(inode, f));
}
static int wd_release(struct inode *inode, struct file *file)
return(-EINVAL);
}
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (count) {
wd_pingtimer(pTimer);
return 1;
if (!found)
break;
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(HZ);
+ msleep(1000);
}
kenvctrld_task = NULL;
}
static int riowd_open(struct inode *inode, struct file *filp)
{
+ nonseekable_open(inode, filp);
return 0;
}
static ssize_t riowd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (count) {
riowd_pingtimer();
return 1;
void __init sun4_dvma_init(void)
{
struct sbus_dma *dma;
- struct sbus_dma *dchain;
struct resource r;
if(sun4_dma_physaddr) {
u32 current_time_ms;
TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
int retval = TW_IOCTL_ERROR_OS_EFAULT;
+ void __user *argp = (void __user *)arg;
/* Only let one of these through at a time */
if (down_interruptible(&tw_dev->ioctl_sem)) {
}
/* First copy down the driver command */
- if (copy_from_user(&driver_command, (void *)arg, sizeof(TW_Ioctl_Driver_Command)))
+ if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
goto out2;
/* Check data buffer size */
tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, (void *)arg, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
goto out3;
/* See which ioctl we are doing */
twa_get_request_id(tw_dev, &request_id);
/* Flag internal command */
- tw_dev->srb[request_id] = 0;
+ tw_dev->srb[request_id] = NULL;
/* Flag chrdev ioctl */
tw_dev->chrdev_request_id = request_id;
}
/* Now copy the entire response to userspace */
- if (copy_to_user((void *)arg, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
retval = 0;
out3:
/* Now free ioctl buf memory */
/* clear all the negotiated parameters */
__shost_for_each_device(SDp, host)
- SDp->hostdata = 0;
+ SDp->hostdata = NULL;
/* clear all the slots and their pending commands */
for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
- depends on (PCI || ISA || MCA) && SCSI
+ depends on (PCI || ISA || MCA) && SCSI && (BROKEN || !SPARC64)
---help---
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
config SCSI_EATA
tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
- depends on (ISA || EISA || PCI) && SCSI
+ depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64)
---help---
This driver supports all EATA/DMA-compliant SCSI host adapters. DPT
ISA and all EISA I/O addresses are probed looking for the "EATA"
config SCSI_GDTH
tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
- depends on (ISA || EISA || PCI) && SCSI
+ depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64)
---help---
Formerly called GDT SCSI Disk Array Controller Support.
* and see if we can do an information transfer,
* with failures we will restart.
*/
- hostdata->selecting = 0;
+ hostdata->selecting = NULL;
/* RvC: have to preset this to indicate a new command is being performed */
if (!NCR5380_select(instance, tmp,
to go to sleep */
}
- hostdata->selecting = 0; /* clear this pointer, because we passed the
+ hostdata->selecting = NULL;/* clear this pointer, because we passed the
waiting period */
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
* only used for debugging.
*/
-#if DBG
+#ifdef DBG
#define FIB_COUNTER_INCREMENT(counter) (counter)++
#else
#define FIB_COUNTER_INCREMENT(counter)
#endif /* !PCMCIA */
static int registered_count=0;
-static struct Scsi_Host *aha152x_host[2] = {0, 0};
+static struct Scsi_Host *aha152x_host[2];
static Scsi_Host_Template aha152x_driver_template;
/*
void (*end)(struct Scsi_Host *);
int spio;
} states[] = {
- { "idle", 0, 0, 0, 0},
- { "unknown", 0, 0, 0, 0},
- { "seldo", 0, seldo_run, 0, 0},
- { "seldi", 0, seldi_run, 0, 0},
- { "selto", 0, selto_run, 0, 0},
- { "busfree", 0, busfree_run, 0, 0},
+ { "idle", NULL, NULL, NULL, 0},
+ { "unknown", NULL, NULL, NULL, 0},
+ { "seldo", NULL, seldo_run, NULL, 0},
+ { "seldi", NULL, seldi_run, NULL, 0},
+ { "selto", NULL, selto_run, NULL, 0},
+ { "busfree", NULL, busfree_run, NULL, 0},
{ "msgo", msgo_init, msgo_run, msgo_end, 1},
{ "cmd", cmd_init, cmd_run, cmd_end, 1},
- { "msgi", 0, msgi_run, msgi_end, 1},
- { "status", 0, status_run, 0, 1},
+ { "msgi", NULL, msgi_run, msgi_end, 1},
+ { "status", NULL, status_run, NULL, 1},
{ "datai", datai_init, datai_run, datai_end, 0},
{ "datao", datao_init, datao_run, datao_end, 0},
- { "parerr", 0, parerr_run, 0, 0},
- { "rsti", 0, rsti_run, 0, 0},
+ { "parerr", NULL, parerr_run, NULL, 0},
+ { "rsti", NULL, rsti_run, NULL, 0},
};
/* setup & interrupt */
if(aha152x_host[i] && aha152x_host[i]->irq==irqno)
return aha152x_host[i];
- return 0;
+ return NULL;
}
static irqreturn_t swintr(int irqno, void *dev_id, struct pt_regs *regs)
goto out_host_put;
}
- if( scsi_add_host(shpnt, 0) ) {
+ if( scsi_add_host(shpnt, NULL) ) {
free_irq(shpnt->irq, shpnt);
printk(KERN_ERR "aha152x%d: failed to add host.\n", shpnt->host_no);
goto out_host_put;
return shpnt;
out_host_put:
- aha152x_host[registered_count]=0;
+ aha152x_host[registered_count]=NULL;
scsi_host_put(shpnt);
- return 0;
+ return NULL;
}
void aha152x_release(struct Scsi_Host *shpnt)
}
}
- SCNEXT(SCpnt) = 0;
+ SCNEXT(SCpnt) = NULL;
SCSEM(SCpnt) = sem;
/* setup scratch area
}
#endif
- return aha152x_internal_queue(SCpnt, 0, 0, done);
+ return aha152x_internal_queue(SCpnt, NULL, 0, done);
}
DO_UNLOCK(flags);
kfree(SCpnt->host_scribble);
- SCpnt->host_scribble=0;
+ SCpnt->host_scribble=NULL;
return SUCCESS;
}
SCpnt->cmd_len = 0;
SCpnt->use_sg = 0;
- SCpnt->request_buffer = 0;
+ SCpnt->request_buffer = NULL;
SCpnt->request_bufflen = 0;
init_timer(&timer);
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0);
kfree(SCpnt->host_scribble);
- SCpnt->host_scribble=0;
+ SCpnt->host_scribble=NULL;
ret = SUCCESS;
} else {
next = SCNEXT(ptr);
} else {
printk(DEBUG_LEAD "queue corrupted at %p\n", CMDINFO(ptr), ptr);
- next = 0;
+ next = NULL;
}
if (!ptr->device->soft_reset) {
remove_SC(SCs, ptr);
HOSTDATA(shpnt)->commands--;
kfree(ptr->host_scribble);
- ptr->host_scribble=0;
+ ptr->host_scribble=NULL;
}
ptr = next;
"aha152x: unable to verify geometry for disk with >1GB.\n"
" Using default translation. Please verify yourself.\n"
" Perhaps you need to enable extended translation in the driver.\n"
- " See /usr/src/linux/Documentation/scsi/aha152x.txt for details.\n");
+ " See Documentation/scsi/aha152x.txt for details.\n");
}
} else {
info_array[0] = info[0];
printk(ERR_LEAD "there's already a completed command %p - will cause abort\n", CMDINFO(CURRENT_SC), DONE_SC);
DONE_SC = CURRENT_SC;
- CURRENT_SC = 0;
+ CURRENT_SC = NULL;
DONE_SC->result = error;
} else
printk(KERN_ERR "aha152x: done() called outside of command\n");
#endif
append_SC(&DISCONNECTED_SC, CURRENT_SC);
CURRENT_SC->SCp.phase |= 1 << 16;
- CURRENT_SC = 0;
+ CURRENT_SC = NULL;
} else {
done(shpnt, DID_ERROR << 16);
if(!(DONE_SC->SCp.Status & not_issued)) {
Scsi_Cmnd *ptr = DONE_SC;
- DONE_SC=0;
+ DONE_SC=NULL;
#if 0
DPRINTK(debug_eh, ERR_LEAD "requesting sense\n", CMDINFO(ptr));
#endif
ptr->request_bufflen = sizeof(ptr->sense_buffer);
DO_UNLOCK(flags);
- aha152x_internal_queue(ptr, 0, check_condition, ptr->scsi_done);
+ aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
DO_LOCK(flags);
#if 0
} else {
int lun=DONE_SC->device->lun & 0x7;
#endif
Scsi_Cmnd *ptr = DONE_SC;
- DONE_SC=0;
+ DONE_SC=NULL;
/* turn led off, when no commands are in the driver */
HOSTDATA(shpnt)->commands--;
if(ptr->scsi_done != reset_done) {
kfree(ptr->host_scribble);
- ptr->host_scribble=0;
+ ptr->host_scribble=NULL;
}
DO_UNLOCK(flags);
DO_LOCK(flags);
}
- DONE_SC=0;
+ DONE_SC=NULL;
#if defined(AHA152X_STAT)
} else {
HOSTDATA(shpnt)->busfree_without_done_command++;
append_SC(&ISSUE_SC, CURRENT_SC);
DO_UNLOCK(flags);
- CURRENT_SC = 0;
+ CURRENT_SC = NULL;
}
if(!DISCONNECTED_SC) {
remove_SC(&DISCONNECTED_SC, ptr);
kfree(ptr->host_scribble);
- ptr->host_scribble=0;
+ ptr->host_scribble=NULL;
ptr->result = DID_RESET << 16;
ptr->scsi_done(ptr);
printk(KERN_DEBUG "none\n");
printk(KERN_DEBUG "disconnected_SC:\n");
- for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : 0)
+ for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : NULL)
show_command(ptr);
disp_ports(shpnt);
if(thislength<0) {
DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: output too short\n");
- *start = 0;
+ *start = NULL;
return 0;
}
aha152x_config conf;
#endif
#ifdef __ISAPNP__
- struct pnp_dev *dev=0, *pnpdev[2] = {0, 0};
+ struct pnp_dev *dev=NULL, *pnpdev[2] = {NULL, NULL};
#endif
if ( setup_count ) {
#if defined(__ISAPNP__)
} else if( pnpdev[i] ) {
HOSTDATA(shpnt)->pnpdev=pnpdev[i];
- pnpdev[i]=0;
+ pnpdev[i]=NULL;
#endif
}
} else {
for(i=0; i<ARRAY_SIZE(setup); i++) {
aha152x_release(aha152x_host[i]);
- aha152x_host[i]=0;
+ aha152x_host[i]=NULL;
}
}
my_done = SCtmp->scsi_done;
if (SCtmp->host_scribble) {
kfree(SCtmp->host_scribble);
- SCtmp->host_scribble = 0;
+ SCtmp->host_scribble = NULL;
}
/* Fetch the sense data, and tuck it away, in the required slot. The
Adaptec automatically fetches it, and there is no guarantee that
struct ahd_devinfo *devinfo,
u_int lun, cam_status status,
char *message, int verbose_level);
-#if AHD_TARGET_MODE
+#ifdef AHD_TARGET_MODE
static void ahd_setup_target_msgin(struct ahd_softc *ahd,
struct ahd_devinfo *devinfo,
struct scb *scb);
ahd->msgin_index = 0;
}
}
-#if AHD_TARGET_MODE
+#ifdef AHD_TARGET_MODE
else {
if (bus_phase == P_MESGOUT) {
ahd->msg_type =
tstate = ahd->enabled_targets[i];
if (tstate != NULL) {
-#if AHD_TARGET_MODE
+#ifdef AHD_TARGET_MODE
int j;
for (j = 0; j < AHD_NUM_LUNS; j++) {
free(tstate, M_DEVBUF);
}
}
-#if AHD_TARGET_MODE
+#ifdef AHD_TARGET_MODE
if (ahd->black_hole != NULL) {
xpt_free_path(ahd->black_hole->path);
free(ahd->black_hole, M_DEVBUF);
ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR);
ahd_outb(ahd, CLRINT, CLRSCSIINT);
-#if NEEDS_MORE_TESTING
+#ifdef NEEDS_MORE_TESTING
/*
* Always enable abort on incoming L_Qs if this feature is
* supported. We use this to catch invalid SCB references.
if (match != 0)
match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
if (match != 0) {
-#if AHD_TARGET_MODE
+#ifdef AHD_TARGET_MODE
int group;
group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
/* Make sure the sequencer is in a safe location. */
ahd_clear_critical_section(ahd);
-#if AHD_TARGET_MODE
+#ifdef AHD_TARGET_MODE
if ((ahd->flags & AHD_TARGETROLE) != 0) {
ahd_run_tqinfifo(ahd, /*paused*/TRUE);
}
}
break;
-#if AIC7XXX_NOT_YET
+#ifdef AIC7XXX_NOT_YET
case TRACEPOINT2:
{
printk(INFO_LEAD "Tracepoint #2 reached.\n", p->host_no,
printk(KERN_INFO "aic7xxx: MMAPed I/O failed, reverting to "
"Programmed I/O.\n");
iounmap((void *) (((unsigned long) temp_p->maddr) & PAGE_MASK));
- temp_p->maddr = 0;
+ temp_p->maddr = NULL;
if(temp_p->base == 0)
{
printk("aic7xxx: <%s> at PCI %d/%d/%d\n",
temp_p->pause = hcntrl | PAUSE | INTEN;
temp_p->base = base;
temp_p->mbase = 0;
- temp_p->maddr = 0;
+ temp_p->maddr = NULL;
temp_p->pci_bus = 0;
temp_p->pci_device_fn = slot;
aic_outb(temp_p, hcntrl | PAUSE, HCNTRL);
u32 reply_size = 0;
u32 __user *user_msg = arg;
u32 __user * user_reply = NULL;
- ulong sg_list[pHba->sg_tablesize];
+ void *sg_list[pHba->sg_tablesize];
u32 sg_offset = 0;
u32 sg_count = 0;
int sg_index = 0;
u32 i = 0;
u32 rcode = 0;
- ulong p = 0;
+ void *p = NULL;
ulong flags = 0;
memset(&msg, 0, MAX_MESSAGE_SIZE*4);
}
sg_size = sg[i].flag_count & 0xffffff;
/* Allocate memory for the transfer */
- p = (ulong)kmalloc(sg_size, GFP_KERNEL|ADDR32);
- if(p == 0) {
+ p = kmalloc(sg_size, GFP_KERNEL|ADDR32);
+ if(!p) {
printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
pHba->name,sg_size,i,sg_count);
rcode = -ENOMEM;
/* Copy in the user's SG buffer if necessary */
if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
// TODO 64bit fix
- if (copy_from_user((void*)p,(void*)sg[i].addr_bus, sg_size)) {
+ if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) {
printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
rcode = -EFAULT;
goto cleanup;
}
}
//TODO 64bit fix
- sg[i].addr_bus = (u32)virt_to_bus((void*)p);
+ sg[i].addr_bus = (u32)virt_to_bus(p);
}
}
if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
sg_size = sg[j].flag_count & 0xffffff;
// TODO 64bit fix
- if (copy_to_user((void*)sg[j].addr_bus,(void*)sg_list[j], sg_size)) {
- printk(KERN_WARNING"%s: Could not copy %lx TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
+ if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) {
+ printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
rcode = -EFAULT;
goto cleanup;
}
while(sg_index) {
if(sg_list[--sg_index]) {
if (rcode != -ETIME && rcode != -EINTR)
- kfree((void*)(sg_list[sg_index]));
+ kfree(sg_list[sg_index]);
}
}
return rcode;
u32 base;
int i;
-#if CHECKPAL
+#ifdef CHECKPAL
u8 pal1, pal2, pal3;
#endif
if (EISAbases[i]) { /* Still a possibility ? */
base = 0x1c88 + (i * 0x1000);
-#if CHECKPAL
+#ifdef CHECKPAL
pal1 = inb((u16) base - 8);
pal2 = inb((u16) base - 7);
pal3 = inb((u16) base - 6);
}
/* Nothing found here so we take it from the list */
EISAbases[i] = 0;
-#if CHECKPAL
+#ifdef CHECKPAL
}
#endif
}
EISAbases[x] = 0;
}
}
-#if CHECK_BLINK
+#ifdef CHECK_BLINK
else if (check_blink_state(base)) {
printk("eata_pio: HBA is in BLINK state.\n" "Consult your HBAs manual to correct this.\n");
}
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static inline void dma_clear(struct NCR_ESP *esp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddr, int length);
* via PIO.
*/
+static inline void dma_clear(struct NCR_ESP *esp)
+{
+ struct fastlane_dma_registers *dregs =
+ (struct fastlane_dma_registers *) (esp->dregs);
+ unsigned long *t;
+
+ ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
+ dregs->ctrl_reg = ctrl_data;
+
+ t = (unsigned long *)(esp->edev);
+
+ dregs->clear_strobe = 0;
+ *t = 0 ;
+}
+
/***************************************************************** Detection */
int __init fastlane_esp_detect(Scsi_Host_Template *tpnt)
{
dregs->ctrl_reg = ctrl_data;
}
-static inline void dma_clear(struct NCR_ESP *esp)
-{
- struct fastlane_dma_registers *dregs =
- (struct fastlane_dma_registers *) (esp->dregs);
- unsigned long *t;
-
- ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
- dregs->ctrl_reg = ctrl_data;
-
- t = (unsigned long *)(esp->edev);
-
- dregs->clear_strobe = 0;
- *t = 0 ;
-}
-
static void dma_ints_off(struct NCR_ESP *esp)
{
static int fdomain_isa_detect( int *irq, int *iobase )
{
+#ifndef PCMCIA
int i, j;
int base = 0xdeadbeef;
int flag = 0;
*iobase = base;
return 1; /* success */
+#else
+ return 0;
+#endif
}
/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
if (!(overrides[current_override].NCR5380_map_name))
continue;
- ports = 0;
+ ports = NULL;
switch (overrides[current_override].board) {
case BOARD_NCR5380:
flags = FLAG_NO_PSEUDO_DMA;
.drives = LIST_HEAD_INIT(idescsi_driver.drives),
};
+static int ide_scsi_warned;
+
static int idescsi_ide_open(struct inode *inode, struct file *filp)
{
ide_drive_t *drive = inode->i_bdev->bd_disk->private_data;
drive->usage++;
+ if (!ide_scsi_warned++) {
+ printk(KERN_WARNING "ide-scsi: Warning this device driver is only intended for specialist devices.\n");
+ printk(KERN_WARNING "ide-scsi: Do not use for cd burning, use /dev/hdc directly instead.\n");
+ }
return 0;
}
return -ENODEV;
}
+static int imm_adjust_queue(struct scsi_device *device)
+{
+ blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
+ return 0;
+}
+
static struct scsi_host_template imm_template = {
.module = THIS_MODULE,
.proc_name = "imm",
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
.can_queue = 1,
+ .slave_alloc = imm_adjust_queue,
};
/***************************************************************************
static char *setup_args[] = { "", "", "", "", "", "", "", "", "" };
/* filled in by 'insmod' */
-static char *setup_strings = 0;
+static char *setup_strings;
-#ifdef MODULE_PARM
MODULE_PARM(setup_strings, "s");
-#endif
static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
{
*/
cmd = (Scsi_Cmnd *) hostdata->input_Q;
- prev = 0;
+ prev = NULL;
while (cmd) {
if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
break;
*/
tmp = (Scsi_Cmnd *) hostdata->input_Q;
- prev = 0;
+ prev = NULL;
while (tmp) {
if (tmp == cmd) {
if (prev)
*/
if (!done_setup && setup_strings)
- in2000_setup(setup_strings, 0);
+ in2000_setup(setup_strings, NULL);
detect_count = 0;
for (bios = 0; bios_tab[bios]; bios++) {
case ATA_PROT_DMA:
case ATA_PROT_ATAPI_DMA:
+ case ATA_PROT_ATAPI:
/* check status of DMA engine */
host_stat = ata_bmdma_status(ap);
VPRINTK("BUS_DMA (host_stat 0x%X)\n", host_stat);
*/
if (cp == tp->nego_cp)
- tp->nego_cp = 0;
+ tp->nego_cp = NULL;
/*
** If auto-sense performed, change scsi status.
if (cp == lp->held_ccb) {
xpt_que_splice(&lp->skip_ccbq, &lp->wait_ccbq);
xpt_que_init(&lp->skip_ccbq);
- lp->held_ccb = 0;
+ lp->held_ccb = NULL;
}
}
} else {
script_ofs = dsp;
script_size = 0;
- script_base = 0;
+ script_base = NULL;
script_name = "mem";
}
if (!(cmd & 6)) {
cp = np->header.cp;
if (CCB_PHYS(cp, phys) != dsa)
- cp = 0;
+ cp = NULL;
} else {
cp = np->ccb;
while (cp && (CCB_PHYS (cp, phys) != dsa))
** try to find the interrupted script command,
** and the address at which to continue.
*/
- vdsp = 0;
+ vdsp = NULL;
nxtdsp = 0;
if (dsp > np->p_script &&
dsp <= np->p_script + sizeof(struct script)) {
u_char scntl3;
u_char chg, ofs, per, fak, wide;
u_char num = INB (nc_dsps);
- struct ccb *cp=0;
+ struct ccb *cp=NULL;
u_long dsa = INL (nc_dsa);
u_char target = INB (nc_sdid) & 0x0f;
struct tcb *tp = &np->target[target];
if (cp->magic) {
PRINT_LUN(np, tn, ln);
printk ("ccb free list corrupted (@%p)\n", cp);
- cp = 0;
+ cp = NULL;
}
else {
xpt_insque_tail(qp, &lp->wait_ccbq);
{
struct tcb *tp = &np->target[tn];
struct lcb *lp = tp->lp[ln];
- struct ccb *cp = 0;
+ struct ccb *cp = NULL;
/*
** Allocate memory for this CCB.
NCR_LOCK_NCB(np, flags);
ncr_exception(np);
done_list = np->done_list;
- np->done_list = 0;
+ np->done_list = NULL;
NCR_UNLOCK_NCB(np, flags);
if (DEBUG_FLAGS & DEBUG_TINY) printk ("]\n");
NCR_LOCK_NCB(np, flags);
ncr_timeout(np);
done_list = np->done_list;
- np->done_list = 0;
+ np->done_list = NULL;
NCR_UNLOCK_NCB(np, flags);
if (done_list) {
sts = ncr_reset_bus(np, cmd, 1);
done_list = np->done_list;
- np->done_list = 0;
+ np->done_list = NULL;
NCR_UNLOCK_NCB(np, flags);
ncr_flush_done_cmds(done_list);
sts = ncr_abort_command(np, cmd);
out:
done_list = np->done_list;
- np->done_list = 0;
+ np->done_list = NULL;
NCR_UNLOCK_NCB(np, flags);
ncr_flush_done_cmds(done_list);
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd);
#endif
- cmd->next_wcmd = 0;
+ cmd->next_wcmd = NULL;
if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
else {
while ((wcmd->next_wcmd) != 0)
if (cmd == *pcmd) {
if (to_remove) {
*pcmd = (struct scsi_cmnd *) cmd->next_wcmd;
- cmd->next_wcmd = 0;
+ cmd->next_wcmd = NULL;
}
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
}
pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd;
}
- return 0;
+ return NULL;
}
static void process_waiting_list(struct ncb *np, int sts)
struct scsi_cmnd *waiting_list, *wcmd;
waiting_list = np->waiting_list;
- np->waiting_list = 0;
+ np->waiting_list = NULL;
#ifdef DEBUG_WAITING_LIST
if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
#endif
while ((wcmd = waiting_list) != 0) {
waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd;
- wcmd->next_wcmd = 0;
+ wcmd->next_wcmd = NULL;
if (sts == DID_OK) {
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd);
int length, int func)
{
struct host_data *host_data;
- struct ncb *ncb = 0;
+ struct ncb *ncb = NULL;
int retv;
#ifdef DEBUG_PROC_INFO
**==========================================================
*/
#ifdef MODULE
-char *ncr53c8xx = 0; /* command line passed by insmod */
+char *ncr53c8xx; /* command line passed by insmod */
MODULE_PARM(ncr53c8xx, "s");
#endif
int unit, struct ncr_device *device)
{
struct host_data *host_data;
- struct ncb *np = 0;
- struct Scsi_Host *instance = 0;
+ struct ncb *np = NULL;
+ struct Scsi_Host *instance = NULL;
u_long flags = 0;
int i;
thislength = pos - (buffer + offset);
if(thislength < 0) {
- *start = 0;
+ *start = NULL;
return 0;
}
goto out;
}
- if (ppos != &filp->f_pos) {
- /* "A request was outside the capabilities of the device." */
- retval = (-ENXIO);
- goto out;
- }
-
if (STp->ready != ST_READY) {
if (STp->ready == ST_NO_TAPE)
retval = (-ENOMEDIUM);
goto out;
}
- if (ppos != &filp->f_pos) {
- /* "A request was outside the capabilities of the device." */
- retval = (-ENXIO);
- goto out;
- }
-
if (STp->ready != ST_READY) {
if (STp->ready == ST_NO_TAPE)
retval = (-ENOMEDIUM);
int dev = TAPE_NR(inode);
int mode = TAPE_MODE(inode);
+ nonseekable_open(inode, filp);
write_lock(&os_scsi_tapes_lock);
if (dev >= osst_max_dev || os_scsi_tapes == NULL ||
(STp = os_scsi_tapes[dev]) == NULL || !STp->device) {
qla1280_req_pkt(struct scsi_qla_host *ha)
{
struct device_reg *reg = ha->iobase;
- request_t *pkt = 0;
+ request_t *pkt = NULL;
int cnt;
uint32_t timer;
{
struct device_reg *reg = ha->iobase;
struct response *pkt;
- struct srb *sp = 0;
+ struct srb *sp = NULL;
uint16_t mailbox[MAILBOX_REGISTER_COUNT];
uint16_t *wptr;
uint32_t index;
if (index < MAX_OUTSTANDING_COMMANDS)
sp = ha->outstanding_cmds[index];
else
- sp = 0;
+ sp = NULL;
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[index] = 0;
+ ha->outstanding_cmds[index] = NULL;
/* Save ISP completion status */
CMD_RESULT(sp->cmd) = 0;
}
/* Free outstanding command slot. */
- ha->outstanding_cmds[handle] = 0;
+ ha->outstanding_cmds[handle] = NULL;
cmd = sp->cmd;
if (handle < MAX_OUTSTANDING_COMMANDS)
sp = ha->outstanding_cmds[handle];
else
- sp = 0;
+ sp = NULL;
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[handle] = 0;
+ ha->outstanding_cmds[handle] = NULL;
/* Bad payload or header */
if (pkt->entry_status & (BIT_3 + BIT_2)) {
sp = ha->outstanding_cmds[index];
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[index] = 0;
+ ha->outstanding_cmds[index] = NULL;
if (ha->actthreads)
ha->actthreads--;
/* Validate handle. */
if (pkt->handle < MAX_OUTSTANDING_COMMANDS) {
sp = ha->outstanding_cmds[pkt->handle];
- ha->outstanding_cmds[pkt->handle] = 0;
+ ha->outstanding_cmds[pkt->handle] = NULL;
} else
sp = NULL;
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[pkt->handle] = 0;
+ ha->outstanding_cmds[pkt->handle] = NULL;
if (ha->actthreads)
ha->actthreads--;
sp->lun_queue->out_cnt--;
CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
/* Free outstanding command slot. */
- ha->outstanding_cmds[pkt->handle1] = 0;
+ ha->outstanding_cmds[pkt->handle1] = NULL;
add_to_done_queue(ha, sp);
}
int res;
int retval;
+ nonseekable_open(inode, filp);
SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
sdp = sg_get_dev(dev);
if ((!sdp) || (!sdp->device))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
sdp->disk->disk_name, (int) count));
- if (ppos != &filp->f_pos) ; /* FIXME: Hmm. Seek to the right place, or fail? */
if ((k = verify_area(VERIFY_WRITE, buf, count)))
return k;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
- if (ppos != &filp->f_pos) ; /* FIXME: Hmm. Seek to the right place, or fail? */
if ((k = verify_area(VERIFY_READ, buf, count)))
return k; /* protects following copy_from_user()s + get_user()s */
""
};
- /* Set read only initially */
- set_disk_ro(cd->disk, 1);
-
/* allocate a request for the TEST_UNIT_READY */
SRpnt = scsi_allocate_request(cd->device, GFP_KERNEL);
if (!SRpnt) {
if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM)) !=
(CDC_DVD_RAM | CDC_MRW_W | CDC_RAM)) {
cd->device->writeable = 1;
- set_disk_ro(cd->disk, 0);
}
scsi_release_request(SRpnt);
int dev = TAPE_NR(inode);
char *name;
+ nonseekable_open(inode, filp);
write_lock(&st_dev_arr_lock);
if (dev >= st_dev_max || scsi_tapes == NULL ||
((STp = scsi_tapes[dev]) == NULL)) {
}
\f
/* The checks common to both reading and writing */
-static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count, loff_t *ppos)
+static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count)
{
ssize_t retval = 0;
goto out;
}
- if (ppos != &filp->f_pos) {
- /* "A request was outside the capabilities of the device." */
- retval = (-ENXIO);
- goto out;
- }
-
if (STp->ready != ST_READY) {
if (STp->ready == ST_NO_TAPE)
retval = (-ENOMEDIUM);
if (down_interruptible(&STp->lock))
return -ERESTARTSYS;
- retval = rw_checks(STp, filp, count, ppos);
+ retval = rw_checks(STp, filp, count);
if (retval || count == 0)
goto out;
if (down_interruptible(&STp->lock))
return -ERESTARTSYS;
- retval = rw_checks(STp, filp, count, ppos);
+ retval = rw_checks(STp, filp, count);
if (retval || count == 0)
goto out;
for (i = 0; i < st_dev_max; i++) {
tpnt = scsi_tapes[i];
if (tpnt != NULL && tpnt->device == SDp) {
- scsi_tapes[i] = 0;
+ scsi_tapes[i] = NULL;
st_nr_dev--;
write_unlock(&st_dev_arr_lock);
devfs_unregister_tape(tpnt->disk->number);
return &sym_fw1;
#endif
else
- return 0;
+ return NULL;
}
/*
}
/* Revert everything */
- SYM_UCMD_PTR(cmd)->eh_wait = 0;
+ SYM_UCMD_PTR(cmd)->eh_wait = NULL;
cmd->scsi_done = ep->old_done;
/* Wake up the eh thread if it wants to sleep */
/* On error, restore everything and cross fingers :) */
if (sts) {
- SYM_UCMD_PTR(cmd)->eh_wait = 0;
+ SYM_UCMD_PTR(cmd)->eh_wait = NULL;
cmd->scsi_done = ep->old_done;
to_do = SYM_EH_DO_IGNORE;
}
char **start, off_t offset, int length, int func)
{
struct host_data *host_data;
- struct sym_hcb *np = 0;
+ struct sym_hcb *np = NULL;
int retv;
host_data = (struct host_data *) host->hostdata;
static struct sym_driver_setup
sym_driver_safe_setup __initdata = SYM_LINUX_DRIVER_SAFE_SETUP;
#ifdef MODULE
-char *sym53c8xx = 0; /* command line passed by insmod */
+char *sym53c8xx; /* command line passed by insmod */
MODULE_PARM(sym53c8xx, "s");
#endif
static __inline m_addr_t sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp)
{
- void *vaddr = 0;
+ void *vaddr = NULL;
dma_addr_t baddr = 0;
vaddr = pci_alloc_consistent(mp->dev_dmat,SYM_MEM_CLUSTER_SIZE, &baddr);
} else {
script_ofs = dsp;
script_size = 0;
- script_base = 0;
+ script_base = NULL;
script_name = "mem";
}
return chip;
}
- return 0;
+ return NULL;
}
#if SYM_CONF_DMA_ADDRESSING_MODE == 2
* try to find the interrupted script command,
* and the address at which to continue.
*/
- vdsp = 0;
+ vdsp = NULL;
nxtdsp = 0;
if (dsp > np->scripta_ba &&
dsp <= np->scripta_ba + np->scripta_sz) {
* we are not in race.
*/
i = 0;
- cp = 0;
+ cp = NULL;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp->host_status != HS_BUSY &&
* abort for this target.
*/
i = 0;
- cp = 0;
+ cp = NULL;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp->host_status != HS_DISCONNECT)
else if (dp_scr == SCRIPTA_BA (np, pm1_data))
pm = &cp->phys.pm1;
else
- pm = 0;
+ pm = NULL;
if (pm) {
dp_scr = scr_to_cpu(pm->ret);
* used for negotiation, clear this info in the tcb.
*/
if (cp == tp->nego_cp)
- tp->nego_cp = 0;
+ tp->nego_cp = NULL;
#ifdef SYM_CONF_IARB_SUPPORT
/*
/*
* Make this CCB available.
*/
- cp->cam_ccb = 0;
+ cp->cam_ccb = NULL;
cp->host_status = HS_IDLE;
sym_remque(&cp->link_ccbq);
sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
*/
static ccb_p sym_alloc_ccb(hcb_p np)
{
- ccb_p cp = 0;
+ ccb_p cp = NULL;
int hcode;
/*
* queue to the controller.
*/
if (np->actccbs >= SYM_CONF_MAX_START)
- return 0;
+ return NULL;
/*
* Allocate memory for this CCB.
sym_mfree_dma(cp->sns_bbuf,SYM_SNS_BBUF_LEN,"SNS_BBUF");
sym_mfree_dma(cp, sizeof(*cp), "CCB");
}
- return 0;
+ return NULL;
}
/*
* allocation for not probed LUNs.
*/
if (!sym_is_bit(tp->lun_map, ln))
- return 0;
+ return NULL;
/*
* Initialize the target control block if not yet.
lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS");
if (!lp->cb_tags) {
sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
- lp->itlq_tbl = 0;
+ lp->itlq_tbl = NULL;
goto fail;
}
/*
* Look up our CCB control block.
*/
- cp = 0;
+ cp = NULL;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp2->cam_ccb == ccb) {
* LUN(s) > 0.
*/
#if SYM_CONF_MAX_LUN <= 1
-#define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : 0
+#define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : NULL
#else
#define sym_lp(np, tp, lun) \
- (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0
+ (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : NULL
#endif
/*
m_link_p h = mp->h;
if (size > SYM_MEM_CLUSTER_SIZE)
- return 0;
+ return NULL;
while (size > s) {
s <<= 1;
if (s == SYM_MEM_CLUSTER_SIZE) {
h[j].next = (m_link_p) M_GET_MEM_CLUSTER();
if (h[j].next)
- h[j].next->next = 0;
+ h[j].next->next = NULL;
break;
}
++j;
j -= 1;
s >>= 1;
h[j].next = (m_link_p) (a+s);
- h[j].next->next = 0;
+ h[j].next->next = NULL;
}
}
#ifdef DEBUG
#ifdef SYM_MEM_FREE_UNUSED
static struct sym_m_pool mp0 =
- {0, ___mp0_get_mem_cluster, ___mp0_free_mem_cluster};
+ {NULL, ___mp0_get_mem_cluster, ___mp0_free_mem_cluster};
#else
static struct sym_m_pool mp0 =
- {0, ___mp0_get_mem_cluster};
+ {NULL, ___mp0_get_mem_cluster};
#endif
/*
/* Create a new memory DMAable pool (when fetch failed) */
static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat)
{
- m_pool_p mp = 0;
+ m_pool_p mp = NULL;
mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
if (mp) {
}
if (mp)
__sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL");
- return 0;
+ return NULL;
}
#ifdef SYM_MEM_FREE_UNUSED
void *__sym_calloc_dma_unlocked(m_pool_ident_t dev_dmat, int size, char *name)
{
m_pool_p mp;
- void *m = 0;
+ void *m = NULL;
mp = ___get_dma_pool(dev_dmat);
if (!mp)
{
m_pool_p mp;
int hc = VTOB_HASH_CODE(m);
- m_vtob_p vp = 0;
+ m_vtob_p vp = NULL;
m_addr_t a = ((m_addr_t) m) & ~SYM_MEM_CLUSTER_MASK;
mp = ___get_dma_pool(dev_dmat);
if (elem != head)
__sym_que_del(head, elem->flink);
else
- elem = 0;
+ elem = NULL;
return elem;
}
u_char *gpcntl)
{
OUTB (nc_gpcntl, *gpcntl & 0xfe);
- S24C16_do_bit(np, 0, write_bit, gpreg);
+ S24C16_do_bit(np, NULL, write_bit, gpreg);
OUTB (nc_gpcntl, *gpcntl);
}
int x;
for (x = 0; x < 8; x++)
- S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
+ S24C16_do_bit(np, NULL, (write_data >> (7 - x)) & 0x01, gpreg);
S24C16_read_ack(np, ack_data, gpreg, gpcntl);
}
if (elem != head)
__xpt_que_del(head, elem->flink);
else
- elem = 0;
+ elem = NULL;
return elem;
}
m_link_s *h = mp->h;
if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
- return 0;
+ return NULL;
while (size > s) {
s <<= 1;
if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
h[j].next = (m_link_s *) M_GETP();
if (h[j].next)
- h[j].next->next = 0;
+ h[j].next->next = NULL;
break;
}
++j;
j -= 1;
s >>= 1;
h[j].next = (m_link_s *) (a+s);
- h[j].next->next = 0;
+ h[j].next->next = NULL;
}
}
#ifdef DEBUG
--mp->nump;
}
-static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep};
+static m_pool_s mp0 = {NULL, ___mp0_getp, ___mp0_freep};
/*
* DMAable pools.
{
u_long flags;
struct m_pool *mp;
- void *m = 0;
+ void *m = NULL;
NCR_LOCK_DRIVER(flags);
mp = ___get_dma_pool(bush);
u_long flags;
m_pool_s *mp;
int hc = VTOB_HASH_CODE(m);
- m_vtob_s *vp = 0;
+ m_vtob_s *vp = NULL;
m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
NCR_LOCK_DRIVER(flags);
pdev = pACB->pdev;
pci_read_config_word(pdev, PCI_STATUS, &pstat);
printk ("DC390: Register dump: PCI Status: %04x\n", pstat);
- printk ("DC390: In case of driver trouble read linux/Documentation/scsi/tmscsim.txt\n");
+ printk ("DC390: In case of driver trouble read Documentation/scsi/tmscsim.txt\n");
}
return SCSI_ABORT_NOT_RUNNING;
if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
- config.mscp[mscp_index].SCint = 0;
+ config.mscp[mscp_index].SCint = NULL;
done = config.mscp[mscp_index].done;
- config.mscp[mscp_index].done = 0;
+ config.mscp[mscp_index].done = NULL;
SCpnt->result = DID_ABORT << 16;
/* Take the host lock to guard against scsi layer re-entry */
{
config.mscp[i].SCint->result = DID_RESET << 16;
config.mscp[i].done(config.mscp[i].SCint);
- config.mscp[i].done = 0;
+ config.mscp[i].done = NULL;
}
- config.mscp[i].SCint = 0;
+ config.mscp[i].SCint = NULL;
}
#endif
if (icm_status == 3) {
void (*done)(Scsi_Cmnd *) = mscp->done;
if (done) {
- mscp->done = 0;
+ mscp->done = NULL;
mscp->SCint->result = DID_ABORT << 16;
done(mscp->SCint);
}
once we call done, we may get another command queued before this
interrupt service routine can return. */
done = mscp->done;
- mscp->done = 0;
+ mscp->done = NULL;
/* Let the higher levels know that we're done */
switch (mscp->adapter_status)
SCtmp->result = status | mscp->target_status;
- SCtmp->host_scribble = 0;
+ SCtmp->host_scribble = NULL;
/* Free up mscp block for next command */
#if ULTRASTOR_MAX_CMDS == 1
static int __devinit pci_xircom_init(struct pci_dev *dev)
{
- __set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/10);
+ msleep(100);
return 0;
}
# The new 8250/16550 serial drivers
config SERIAL_8250
tristate "8250/16550 and compatible serial support"
+ depends on (BROKEN || !SPARC64)
select SERIAL_CORE
---help---
This selects whether you want to include the driver for the standard
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
-config SERIAL_MPSC
- bool "Marvell MPSC serial port support"
- depends on PPC32 && MV64X60
- select SERIAL_CORE
- help
- Say Y here if you want to use the Marvell MPSC serial controller.
-
-config SERIAL_MPSC_CONSOLE
- bool "Support for console on Marvell MPSC serial port"
- depends on SERIAL_MPSC
- select SERIAL_CORE_CONSOLE
- help
- Say Y here if you want to support a serial console on a Marvell MPSC.
-
config SERIAL_PXA
bool "PXA serial port support"
depends on ARM && ARCH_PXA
help
Select the is option to use SMC2 as a serial port
-endmenu
+config SERIAL_SGI_L1_CONSOLE
+ bool "SGI Altix L1 serial console support"
+ depends on IA64_GENERIC || IA64_SGI_SN2
+ select SERIAL_CORE
+ help
+ If you have an SGI Altix and you would like to use the system
+ controller serial port as your console (you want this!),
+ say Y. Otherwise, say N.
+config SERIAL_MPC52xx
+ tristate "Freescale MPC52xx family PSC serial support"
+ depends on PPC_MPC52xx
+ select SERIAL_CORE
+ help
+ This drivers support the MPC52xx PSC serial ports. If you would
+ like to use them, you must answer Y or M to this option. Not that
+ for use as console, it must be included in kernel and not as a
+ module.
+
+config SERIAL_MPC52xx_CONSOLE
+ bool "Console on a Freescale MPC52xx family PSC serial port"
+ depends on SERIAL_MPC52xx=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Select this options if you'd like to use one of the PSC serial port
+ of the Freescale MPC52xx family as a console.
+
+config SERIAL_MPC52xx_CONSOLE_BAUD
+ int "Freescale MPC52xx family PSC serial port baud"
+ depends on SERIAL_MPC52xx_CONSOLE=y
+ default "9600"
+ help
+ Select the MPC52xx console baud rate.
+ This value is only used if the bootloader doesn't pass in the
+ console baudrate
+
+endmenu
# $Id: Makefile,v 1.8 2002/07/21 21:32:30 rmk Exp $
#
-obj-$(CONFIG_SERIAL_MPSC) += mpsc/
-
serial-8250-y :=
serial-8250-$(CONFIG_SERIAL_8250_ACPI) += 8250_acpi.o
serial-8250-$(CONFIG_GSC) += 8250_gsc.o
obj-$(CONFIG_SERIAL_DZ) += dz.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
obj-$(CONFIG_SERIAL_BAST_SIO) += bast_sio.o
+obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o
obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
+obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
/**************************************************************/
static int cpm_uart_tx_pump(struct uart_port *port);
-static void cpm_uart_init_smc(struct uart_cpm_port *pinfo, int bits, u16 cval);
-static void cpm_uart_init_scc(struct uart_cpm_port *pinfo, int sbits, u16 sval);
+static void cpm_uart_init_smc(struct uart_cpm_port *pinfo);
+static void cpm_uart_init_scc(struct uart_cpm_port *pinfo);
+static void cpm_uart_initbd(struct uart_cpm_port *pinfo);
/**************************************************************/
pr_debug("CPM uart[%d]:start tx\n", port->line);
- /* if in the middle of discarding return */
- if (IS_DISCARDING(pinfo))
- return;
-
if (IS_SMC(pinfo)) {
if (smcp->smc_smcm & SMCM_TX)
return;
static int cpm_uart_startup(struct uart_port *port)
{
int retval;
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
pr_debug("CPM uart[%d]:startup\n", port->line);
if (retval)
return retval;
+ /* Startup rx-int */
+ if (IS_SMC(pinfo)) {
+ pinfo->smcp->smc_smcm |= SMCM_RX;
+ pinfo->smcp->smc_smcmr |= SMCMR_REN;
+ } else {
+ pinfo->sccp->scc_sccm |= UART_SCCM_RX;
+ }
+
return 0;
}
}
/* Shut them really down and reinit buffer descriptors */
- cpm_line_cr_cmd(line, CPM_CR_INIT_TRX);
+ cpm_line_cr_cmd(line, CPM_CR_STOP_TX);
+ cpm_uart_initbd(pinfo);
}
}
{
int baud;
unsigned long flags;
- u16 cval, scval;
+ u16 cval, scval, prev_mode;
int bits, sbits;
struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
- int line = pinfo - cpm_uart_ports;
- volatile cbd_t *bdp;
+ volatile smc_t *smcp = pinfo->smcp;
+ volatile scc_t *sccp = pinfo->sccp;
pr_debug("CPM uart[%d]:set_termios\n", port->line);
- spin_lock_irqsave(&port->lock, flags);
- /* disable uart interrupts */
- if (IS_SMC(pinfo))
- pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
- else
- pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX);
- pinfo->flags |= FLAG_DISCARDING;
- spin_unlock_irqrestore(&port->lock, flags);
-
- /* if previous configuration exists wait for tx to finish */
- if (pinfo->baud != 0 && pinfo->bits != 0) {
-
- /* point to the last txed bd */
- bdp = pinfo->tx_cur;
- if (bdp == pinfo->tx_bd_base)
- bdp = pinfo->tx_bd_base + (pinfo->tx_nrfifos - 1);
- else
- bdp--;
-
- /* wait for it to be transmitted */
- while ((bdp->cbd_sc & BD_SC_READY) != 0)
- schedule();
-
- /* and delay for the hw fifo to drain */
- udelay((3 * 1000000 * pinfo->bits) / pinfo->baud);
- }
-
- spin_lock_irqsave(&port->lock, flags);
-
- /* Send the CPM an initialize command. */
- cpm_line_cr_cmd(line, CPM_CR_STOP_TX);
-
- /* Stop uart */
- if (IS_SMC(pinfo))
- pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
- else
- pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
-
- /* Send the CPM an initialize command. */
- cpm_line_cr_cmd(line, CPM_CR_INIT_TRX);
-
- spin_unlock_irqrestore(&port->lock, flags);
-
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
/* Character length programmed into the mode register is the
spin_lock_irqsave(&port->lock, flags);
- cpm_set_brg(pinfo->brg - 1, baud);
-
/* Start bit has not been added (so don't, because we would just
* subtract it later), and we need to add one for the number of
* stops bits (there is always at least one).
*/
bits++;
+ if (IS_SMC(pinfo)) {
+ /* Set the mode register. We want to keep a copy of the
+ * enables, because we want to put them back if they were
+ * present.
+ */
+ prev_mode = smcp->smc_smcmr;
+ smcp->smc_smcmr = smcr_mk_clen(bits) | cval | SMCMR_SM_UART;
+ smcp->smc_smcmr |= (prev_mode & (SMCMR_REN | SMCMR_TEN));
+ } else {
+ sccp->scc_psmr = (sbits << 12) | scval;
+ }
- /* re-init */
- if (IS_SMC(pinfo))
- cpm_uart_init_smc(pinfo, bits, cval);
- else
- cpm_uart_init_scc(pinfo, sbits, scval);
-
- pinfo->baud = baud;
- pinfo->bits = bits;
-
- pinfo->flags &= ~FLAG_DISCARDING;
+ cpm_set_brg(pinfo->brg - 1, baud);
spin_unlock_irqrestore(&port->lock, flags);
}
return 1;
}
-static void cpm_uart_init_scc(struct uart_cpm_port *pinfo, int bits, u16 scval)
+/*
+ * init buffer descriptors
+ */
+static void cpm_uart_initbd(struct uart_cpm_port *pinfo)
{
- int line = pinfo - cpm_uart_ports;
- volatile scc_t *scp;
- volatile scc_uart_t *sup;
+ int i;
u8 *mem_addr;
volatile cbd_t *bdp;
- int i;
- pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line);
-
- scp = pinfo->sccp;
- sup = pinfo->sccup;
+ pr_debug("CPM uart[%d]:initbd\n", pinfo->port.line);
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
- pinfo->rx_cur = pinfo->rx_bd_base;
mem_addr = pinfo->mem_addr;
- for (bdp = pinfo->rx_bd_base, i = 0; i < pinfo->rx_nrfifos; i++, bdp++) {
+ bdp = pinfo->rx_cur = pinfo->rx_bd_base;
+ for (i = 0; i < (pinfo->rx_nrfifos - 1); i++, bdp++) {
bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT | (i < (pinfo->rx_nrfifos - 1) ? 0 : BD_SC_WRAP);
+ bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT;
mem_addr += pinfo->rx_fifosize;
}
+
+ bdp->cbd_bufaddr = virt_to_bus(mem_addr);
+ bdp->cbd_sc = BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT;
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize);
- pinfo->tx_cur = pinfo->tx_bd_base;
- for (bdp = pinfo->tx_bd_base, i = 0; i < pinfo->tx_nrfifos; i++, bdp++) {
+ bdp = pinfo->tx_cur = pinfo->tx_bd_base;
+ for (i = 0; i < (pinfo->tx_nrfifos - 1); i++, bdp++) {
bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_INTRPT | (i < (pinfo->tx_nrfifos - 1) ? 0 : BD_SC_WRAP);
+ bdp->cbd_sc = BD_SC_INTRPT;
mem_addr += pinfo->tx_fifosize;
- bdp++;
}
+
+ bdp->cbd_bufaddr = virt_to_bus(mem_addr);
+ bdp->cbd_sc = BD_SC_WRAP | BD_SC_INTRPT;
+}
+
+static void cpm_uart_init_scc(struct uart_cpm_port *pinfo)
+{
+ int line = pinfo - cpm_uart_ports;
+ volatile scc_t *scp;
+ volatile scc_uart_t *sup;
+
+ pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line);
+
+ scp = pinfo->sccp;
+ sup = pinfo->sccup;
/* Store address */
pinfo->sccup->scc_genscc.scc_rbase = (unsigned char *)pinfo->rx_bd_base - DPRAM_BASE;
(SCC_GSMRL_MODE_UART | SCC_GSMRL_TDCR_16 | SCC_GSMRL_RDCR_16);
/* Enable rx interrupts and clear all pending events. */
- scp->scc_sccm = UART_SCCM_RX;
+ scp->scc_sccm = 0;
scp->scc_scce = 0xffff;
scp->scc_dsr = 0x7e7e;
- scp->scc_psmr = (bits << 12) | scval;
+ scp->scc_psmr = 0x3000;
scp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
}
-static void cpm_uart_init_smc(struct uart_cpm_port *pinfo, int bits, u16 cval)
+static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
{
int line = pinfo - cpm_uart_ports;
volatile smc_t *sp;
volatile smc_uart_t *up;
- volatile u8 *mem_addr;
- volatile cbd_t *bdp;
- int i;
pr_debug("CPM uart[%d]:init_smc\n", pinfo->port.line);
sp = pinfo->smcp;
up = pinfo->smcup;
- /* Set the physical address of the host memory
- * buffers in the buffer descriptors, and the
- * virtual address for us to work with.
- */
- mem_addr = pinfo->mem_addr;
- pinfo->rx_cur = pinfo->rx_bd_base;
- for (bdp = pinfo->rx_bd_base, i = 0; i < pinfo->rx_nrfifos; i++, bdp++) {
- bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT | (i < (pinfo->rx_nrfifos - 1) ? 0 : BD_SC_WRAP);
- mem_addr += pinfo->rx_fifosize;
- }
-
- /* Set the physical address of the host memory
- * buffers in the buffer descriptors, and the
- * virtual address for us to work with.
- */
- mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize);
- pinfo->tx_cur = pinfo->tx_bd_base;
- for (bdp = pinfo->tx_bd_base, i = 0; i < pinfo->tx_nrfifos; i++, bdp++) {
- bdp->cbd_bufaddr = virt_to_bus(mem_addr);
- bdp->cbd_sc = BD_SC_INTRPT | (i < (pinfo->tx_nrfifos - 1) ? 0 : BD_SC_WRAP);
- mem_addr += pinfo->tx_fifosize;
- }
-
/* Store address */
pinfo->smcup->smc_rbase = (u_char *)pinfo->rx_bd_base - DPRAM_BASE;
pinfo->smcup->smc_tbase = (u_char *)pinfo->tx_bd_base - DPRAM_BASE;
cpm_line_cr_cmd(line, CPM_CR_INIT_TRX);
- /* Set UART mode, according to the parameters */
- sp->smc_smcmr = smcr_mk_clen(bits) | cval | SMCMR_SM_UART;
+ /* Set UART mode, 8 bit, no parity, one stop.
+ * Enable receive and transmit.
+ */
+ sp->smc_smcmr = smcr_mk_clen(9) | SMCMR_SM_UART;
/* Enable only rx interrupts clear all pending events. */
- sp->smc_smcm = SMCM_RX;
+ sp->smc_smcm = 0;
sp->smc_smce = 0xff;
sp->smc_smcmr |= (SMCMR_REN | SMCMR_TEN);
if (pinfo->set_lineif)
pinfo->set_lineif(pinfo);
+ if (IS_SMC(pinfo)) {
+ pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
+ pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
+ } else {
+ pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX);
+ pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+ }
+
ret = cpm_uart_allocbuf(pinfo, 0);
+
if (ret)
return ret;
+ cpm_uart_initbd(pinfo);
+
return 0;
}
volatile cbd_t *bdp, *bdbase;
volatile unsigned char *cp;
- if (IS_DISCARDING(pinfo))
- return;
-
/* Get the address of the host memory buffer.
*/
bdp = pinfo->tx_cur;
if (pinfo->set_lineif)
pinfo->set_lineif(pinfo);
+ if (IS_SMC(pinfo)) {
+ pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
+ pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
+ } else {
+ pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX);
+ pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+ }
+
ret = cpm_uart_allocbuf(pinfo, 1);
+
if (ret)
return ret;
+ cpm_uart_initbd(pinfo);
+
+ if (IS_SMC(pinfo))
+ cpm_uart_init_smc(pinfo);
+ else
+ cpm_uart_init_scc(pinfo);
+
uart_set_options(port, co, baud, parity, bits, flow);
return 0;
{
int dpmemsz, memsz;
u8 *dp_mem;
- uint dp_addr;
+ uint dp_offset;
u8 *mem_addr;
- dma_addr_t dma_addr;
+ dma_addr_t dma_addr = 0;
pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
- dp_mem = m8xx_cpm_dpalloc(dpmemsz);
- if (dp_mem == NULL) {
+ dp_offset = cpm_dpalloc(dpmemsz, 8);
+ if (IS_DPERR(dp_offset)) {
printk(KERN_ERR
"cpm_uart_cpm1.c: could not allocate buffer descriptors\n");
return -ENOMEM;
}
- dp_addr = m8xx_cpm_dpram_offset(dp_mem);
+ dp_mem = cpm_dpram_addr(dp_offset);
memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
GFP_KERNEL);
if (mem_addr == NULL) {
- m8xx_cpm_dpfree(dp_mem);
+ cpm_dpfree(dp_offset);
printk(KERN_ERR
"cpm_uart_cpm1.c: could not allocate coherent memory\n");
return -ENOMEM;
}
- pinfo->dp_addr = dp_addr;
+ pinfo->dp_addr = dp_offset;
pinfo->mem_addr = mem_addr;
pinfo->dma_addr = dma_addr;
pinfo->tx_fifosize), pinfo->mem_addr,
pinfo->dma_addr);
- m8xx_cpm_dpfree(m8xx_cpm_dpram_addr(pinfo->dp_addr));
+ cpm_dpfree(pinfo->dp_addr);
}
/* Setup any dynamic params in the uart desc */
static inline void cpm_set_brg(int brg, int baud)
{
- m8xx_cpm_setbrg(brg, baud);
+ cpm_setbrg(brg, baud);
}
static inline void cpm_set_scc_fcr(volatile scc_uart_t * sup)
{
int dpmemsz, memsz;
u8 *dp_mem;
- uint dp_addr;
+ uint dp_offset;
u8 *mem_addr;
dma_addr_t dma_addr = 0;
pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
- dp_mem = cpm2_dpalloc(dpmemsz, 8);
- if (dp_mem == NULL) {
+ dp_offset = cpm_dpalloc(dpmemsz, 8);
+ if (IS_DPERR(dp_offset)) {
printk(KERN_ERR
- "cpm_uart_cpm1.c: could not allocate buffer descriptors\n");
+ "cpm_uart_cpm.c: could not allocate buffer descriptors\n");
return -ENOMEM;
}
- dp_addr = cpm2_dpram_offset(dp_mem);
+ dp_mem = cpm_dpram_addr(dp_offset);
memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
GFP_KERNEL);
if (mem_addr == NULL) {
- cpm2_dpfree(dp_mem);
+ cpm_dpfree(dp_offset);
printk(KERN_ERR
- "cpm_uart_cpm1.c: could not allocate coherent memory\n");
+ "cpm_uart_cpm.c: could not allocate coherent memory\n");
return -ENOMEM;
}
- pinfo->dp_addr = dp_addr;
+ pinfo->dp_addr = dp_offset;
pinfo->mem_addr = mem_addr;
pinfo->dma_addr = dma_addr;
pinfo->tx_fifosize), pinfo->mem_addr,
pinfo->dma_addr);
- cpm2_dpfree(&pinfo->dp_addr);
+ cpm_dpfree(pinfo->dp_addr);
}
/* Setup any dynamic params in the uart desc */
static inline void cpm_set_brg(int brg, int baud)
{
- cpm2_setbrg(brg, baud);
+ cpm_setbrg(brg, baud);
}
static inline void cpm_set_scc_fcr(volatile scc_uart_t * sup)
if (tty->flip.count >= TTY_FLIPBUF_SIZE)
drop = 1;
if (ZS_IS_ASLEEP(uap))
- return 0;
+ return NULL;
if (!ZS_IS_OPEN(uap))
goto retry;
}
ioremap(np->addrs[np->n_addrs - 1].address, 0x1000);
if (uap->rx_dma_regs == NULL) {
iounmap((void *)uap->tx_dma_regs);
+ uap->tx_dma_regs = NULL;
uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
goto no_dma;
}
uap->port.ops = &pmz_pops;
uap->port.type = PORT_PMAC_ZILOG;
uap->port.flags = 0;
- spin_lock_init(&uap->port.lock);
/* Setup some valid baud rate information in the register
* shadows so we don't write crap there before baud rate is
{
struct device_node *np;
- iounmap((void *)uap->control_reg);
np = uap->node;
+ iounmap((void *)uap->rx_dma_regs);
+ iounmap((void *)uap->tx_dma_regs);
+ iounmap((void *)uap->control_reg);
uap->node = NULL;
of_node_put(np);
+ memset(uap, 0, sizeof(struct uart_pmac_port));
}
/*
* Register this driver with the serial core
*/
rc = uart_register_driver(&pmz_uart_reg);
- if (rc != 0)
+ if (rc)
return rc;
/*
struct uart_pmac_port *uport = &pmz_ports[i];
/* NULL node may happen on wallstreet */
if (uport->node != NULL)
- uart_add_one_port(&pmz_uart_reg, &uport->port);
+ rc = uart_add_one_port(&pmz_uart_reg, &uport->port);
+ if (rc)
+ goto err_out;
}
return 0;
+err_out:
+ while (i-- > 0) {
+ struct uart_pmac_port *uport = &pmz_ports[i];
+ uart_remove_one_port(&pmz_uart_reg, &uport->port);
+ }
+ uart_unregister_driver(&pmz_uart_reg);
+ return rc;
}
static struct of_match pmz_match[] =
static int __init init_pmz(void)
{
+ int rc, i;
printk(KERN_INFO "%s\n", version);
/*
/*
* Now we register with the serial layer
*/
- pmz_register();
+ rc = pmz_register();
+ if (rc) {
+ printk(KERN_ERR
+ "pmac_zilog: Error registering serial device, disabling pmac_zilog.\n"
+ "pmac_zilog: Did another serial driver already claim the minors?\n");
+ /* effectively "pmz_unprobe()" */
+ for (i=0; i < pmz_ports_count; i++)
+ pmz_dispose_port(&pmz_ports[i]);
+ return rc;
+ }
/*
* Then we register the macio driver itself
cflag = CREAD | HUPCL | CLOCAL;
s = mode;
- baud = simple_strtoul(s, 0, 0);
+ baud = simple_strtoul(s, NULL, 0);
s = strchr(s, ',');
- bits = simple_strtoul(++s, 0, 0);
+ bits = simple_strtoul(++s, NULL, 0);
s = strchr(s, ',');
parity = *(++s);
s = strchr(s, ',');
- stop = simple_strtoul(++s, 0, 0);
+ stop = simple_strtoul(++s, NULL, 0);
s = strchr(s, ',');
/* XXX handshake is not handled here. */
{
unsigned char status1, status2, scratch, scratch2, scratch3;
unsigned char save_lcr, save_mcr;
- struct linux_ebus_device *dev = 0;
+ struct linux_ebus_device *dev = NULL;
struct linux_ebus *ebus;
#ifdef CONFIG_SPARC64
struct sparc_isa_bridge *isa_br;
s->count++;
up(&open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int usb_audio_release_mixdev(struct inode *inode, struct file *file)
unsigned int ptr;
int cnt, err;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (as->usbin.dma.mapped)
return -ENXIO;
if (!as->usbin.dma.ready && (ret = prog_dmabuf_in(as)))
unsigned int start_thr;
int cnt, err;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (as->usbout.dma.mapped)
return -ENXIO;
if (!as->usbout.dma.ready && (ret = prog_dmabuf_out(as)))
as->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
s->count++;
up(&open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int usb_audio_release(struct inode *inode, struct file *file)
ssize_t ret;
DECLARE_WAITQUEUE(wait, current);
- if ( ppos != &file->f_pos ) {
- return -ESPIPE;
- }
if ( !access_ok(VERIFY_READ, buffer, count) ) {
return -EFAULT;
}
ssize_t ret;
unsigned long int flags;
- if ( ppos != &file->f_pos ) {
- return -ESPIPE;
- }
if ( !access_ok(VERIFY_READ, buffer, count) ) {
return -EFAULT;
}
printk(KERN_INFO "usb-midi: Open Succeeded. minor= %d.\n", minor);
#endif
- return 0; /** Success. **/
+ return nonseekable_open(inode, file); /** Success. **/
}
return 0;
}
-#define hub_suspend 0
-#define hub_resume 0
+#define hub_suspend NULL
+#define hub_resume NULL
#define remote_wakeup(x) 0
#endif /* CONFIG_USB_SUSPEND */
int rndis_proc_write (struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
+ rndis_params *p = data;
u32 speed = 0;
int i, fl_speed = 0;
for (i = 0; i < count; i++) {
- switch (*buffer) {
+ char c;
+ if (get_user(c, buffer))
+ return -EFAULT;
+ switch (c) {
case '0':
case '1':
case '2':
case '8':
case '9':
fl_speed = 1;
- speed = speed*10 + *buffer - '0';
+ speed = speed*10 + c - '0';
break;
case 'C':
case 'c':
- rndis_signal_connect (((rndis_params *) data)
- ->confignr);
+ rndis_signal_connect (p->confignr);
break;
case 'D':
case 'd':
- rndis_signal_disconnect (((rndis_params *) data)
- ->confignr);
+ rndis_signal_disconnect(p->confignr);
break;
default:
- if (fl_speed) ((rndis_params *) data)->speed = speed;
- else DEBUG ("%c is not valid\n", *buffer);
+ if (fl_speed) p->speed = speed;
+ else DEBUG ("%c is not valid\n", c);
break;
}
size_t nbytes, loff_t *ppos)
{
struct uhci_proc *up = file->private_data;
- unsigned int pos;
- unsigned int size;
-
- pos = *ppos;
- size = up->size;
- if (pos >= size)
- return 0;
- if (nbytes > size - pos)
- nbytes = size - pos;
-
- if (copy_to_user(buf, up->data + pos, nbytes))
- return -EFAULT;
-
- *ppos += nbytes;
-
- return nbytes;
+ return simple_read_from_buffer(buf, nbytes, ppos, up->data, up->size);
}
static int uhci_proc_release(struct inode *inode, struct file *file)
file->f_pos = 0;
file->private_data = s;
- return 0;
+ return nonseekable_open(inode, file);
}
static int dabusb_release (struct inode *inode, struct file *file)
* Based on the Linux CPiA driver written by Peter Pregler,
* Scott J. Bertin and Johannes Erdfelt.
*
- * Please see the file: linux/Documentation/usb/ov511.txt
+ * Please see the file: Documentation/usb/ov511.txt
* and the website at: http://alpha.dyndns.org/ov511
* for more info.
*
DECLARE_WAITQUEUE(wait, current);
int bytes_to_read;
- Trace(TRACE_READ, "video_read(0x%p, %p, %d) called.\n", vdev, buf, count);
+ Trace(TRACE_READ, "video_read(0x%p, %p, %zd) called.\n", vdev, buf, count);
if (vdev == NULL)
return -EFAULT;
pdev = vdev->priv;
/* file IO stuff */
file->f_pos = 0;
file->private_data = ccp;
- return 0;
+ return nonseekable_open(inode, file);
/* Error exit */
ofail: up (&cp->mutex);
dbg(2, "%s: enter", __FUNCTION__);
+ nonseekable_open(inode, file);
subminor = iminor(inode);
down (&disconnect_sem);
*
* Based on dabusb.c, printer.c & scanner.c
*
- * Please see the file: linux/Documentation/usb/SilverLink.txt
+ * Please see the file: Documentation/usb/silverlink.txt
* and the website at: http://lpg.ticalc.org/prj_usb/
* for more info.
*
filp->f_pos = 0;
filp->private_data = s;
- return 0;
+ return nonseekable_open(inode, filp);
}
static int
config FB_CIRRUS
tristate "Cirrus Logic support"
- depends on FB && (AMIGA || PCI)
+ depends on FB && (ZORRO || PCI)
---help---
This enables support for Cirrus Logic GD542x/543x based boards on
Amiga: SD64, Piccolo, Picasso II/II+, Picasso IV, or EGS Spectrum.
config FB_CYBER2000
tristate "CyberPro 2000/2010/5000 support"
- depends on FB && PCI
+ depends on FB && PCI && (BROKEN || !SPARC64)
help
This enables support for the Integraphics CyberPro 20x0 and 5000
VGA chips used in the Rebel.com Netwinder and other machines.
config FB_S3TRIO
bool "S3 Trio display support"
- depends on FB && PPC
+ depends on FB && PPC && BROKEN
help
If you have a S3 Trio say Y. Say N for S3 Virge.
independently validate video mode parameters, you should say Y
here.
+config FB_RIVA_DEBUG
+ bool "Lots of debug output from Riva(nVidia) driver"
+ depends on FB_RIVA
+ default n
+ help
+ Say Y here if you want the Riva driver to output all sorts
+ of debugging informations to provide to the maintainer when
+ something goes wrong.
+
config FB_I810
tristate "Intel 810/815 support (EXPERIMENTAL)"
depends on FB && AGP && AGP_INTEL && EXPERIMENTAL && PCI
info->fix.ypanstep = 0;
} else {
info->fix.ywrapstep = 0;
- if (par->vmode &= FB_VMODE_SMOOTH_XPAN)
+ if (par->vmode & FB_VMODE_SMOOTH_XPAN)
info->fix.xpanstep = 1;
else
info->fix.xpanstep = 16<<maxfmode;
*/
{
- u_long tmp = DIVUL(200E9, amiga_eclock);
+ u_long tmp = DIVUL(200000000000ULL, amiga_eclock);
pixclock[TAG_SHRES] = (tmp + 4) / 8; /* SHRES: 35 ns / 28 MHz */
pixclock[TAG_HIRES] = (tmp + 2) / 4; /* HIRES: 70 ns / 14 MHz */
}
-#ifdef __i386__
+#ifdef CONFIG_X86
static void * __devinit aty128_find_mem_vbios(struct aty128fb_par *par)
{
/* I simplified this code as we used to miss the signatures in
}
return rom_base;
}
-#endif /* __i386__ */
+#endif
#endif /* ndef(__sparc__) */
/* fill in known card constants if pll_block is not available */
#ifndef __sparc__
bios = aty128_map_ROM(par, pdev);
-#ifdef __i386__
+#ifdef CONFIG_X86
if (bios == NULL)
bios = aty128_find_mem_vbios(par);
#endif
case FBIO_ATY128_SET_MIRROR:
if (par->chip_gen != rage_M3)
return -EINVAL;
- rc = get_user(value, (__u32*)arg);
+ rc = get_user(value, (__u32 __user *)arg);
if (rc)
return rc;
par->lcd_on = (value & 0x01) != 0;
if (par->chip_gen != rage_M3)
return -EINVAL;
value = (par->crt_on << 1) | par->lcd_on;
- return put_user(value, (__u32*)arg);
+ return put_user(value, (__u32 __user *)arg);
}
#endif
return -EINVAL;
wait_for_idle(par);
aty128fb_set_par(info);
fb_pan_display(info, &info->var);
- fb_set_cmap(&info->cmap, 1, info);
+ fb_set_cmap(&info->cmap, info);
/* Refresh */
fb_set_suspend(info, 0);
fbtyp.fb_cmsize = info->cmap.len;
fbtyp.fb_size = info->fix.smem_len;
if (copy_to_user
- ((struct fbtype *) arg, &fbtyp, sizeof(fbtyp)))
+ ((struct fbtype __user *) arg, &fbtyp, sizeof(fbtyp)))
return -EFAULT;
break;
#endif /* __sparc__ */
case PBOOK_SLEEP_REJECT:
if (par->save_framebuffer) {
vfree(par->save_framebuffer);
- par->save_framebuffer = 0;
+ par->save_framebuffer = NULL;
}
break;
case PBOOK_SLEEP_NOW:
memcpy_toio((void *) info->screen_base,
par->save_framebuffer, nb);
vfree(par->save_framebuffer);
- par->save_framebuffer = 0;
+ par->save_framebuffer = NULL;
}
/* Restore display */
atyfb_set_par(info);
for (m = MIN_M; m <= MAX_M; m++) {
for (n = MIN_N; n <= MAX_N; n++) {
- tempA = (14.31818 * 65536);
+ tempA = 938356; /* 14.31818 * 65536 */
tempA *= (n + 8); /* 43..256 */
tempB = twoToKth * 256;
tempB *= (m + 2); /* 4..32 */
return -ENXIO;
}
-#ifdef __i386__
+#ifdef CONFIG_X86
static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo)
{
/* I simplified this code as we used to miss the signatures in
return 0;
}
-#endif /* __i386__ */
+#endif
#ifdef CONFIG_PPC_OF
/*
printk(KERN_WARNING "radeonfb: Cannot match card to OF node !\n");
return -ENODEV;
}
- val = (u32 *) get_property(dp, "ATY,RefCLK", 0);
+ val = (u32 *) get_property(dp, "ATY,RefCLK", NULL);
if (!val || !*val) {
printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
return -EINVAL;
rinfo->pll.ref_clk = (*val) / 10;
- val = (u32 *) get_property(dp, "ATY,SCLK", 0);
+ val = (u32 *) get_property(dp, "ATY,SCLK", NULL);
if (val && *val)
rinfo->pll.sclk = (*val) / 10;
- val = (u32 *) get_property(dp, "ATY,MCLK", 0);
+ val = (u32 *) get_property(dp, "ATY,MCLK", NULL);
if (val && *val)
rinfo->pll.mclk = (*val) / 10;
/*
* On x86, the primary display on laptop may have it's BIOS
* ROM elsewhere, try to locate it at the legacy memory hole.
- * We probably need to make sure this is the primary dispay,
+ * We probably need to make sure this is the primary display,
* but that is difficult without some arch support.
*/
-#ifdef __i386__
+#ifdef CONFIG_X86
if (rinfo->bios_seg == NULL)
radeon_find_mem_vbios(rinfo);
-#endif /* __i386__ */
+#endif
/* If both above failed, try the BIOS ROM again for mobility
* chips
/* Restore display & engine */
radeonfb_set_par(info);
fb_pan_display(info, &info->var);
- fb_set_cmap(&info->cmap, 1, info);
+ fb_set_cmap(&info->cmap, info);
/* Refresh */
fb_set_suspend(info, 0);
{
struct cg14_par *par = (struct cg14_par *) info->par;
struct cg14_regs *regs = par->regs;
- struct mdi_cfginfo kmdi, *mdii;
+ struct mdi_cfginfo kmdi, __user *mdii;
unsigned long flags;
int cur_mode, mode, ret = 0;
kmdi.mdi_size = par->ramsize;
spin_unlock_irqrestore(&par->lock, flags);
- mdii = (struct mdi_cfginfo *) arg;
+ mdii = (struct mdi_cfginfo __user *) arg;
if (copy_to_user(mdii, &kmdi, sizeof(kmdi)))
ret = -EFAULT;
break;
case MDI_SET_PIXELMODE:
- if (get_user(mode, (int *) arg)) {
+ if (get_user(mode, (int __user *) arg)) {
ret = -EFAULT;
break;
}
case PBOOK_SLEEP_REJECT:
if (save_framebuffer) {
vfree(save_framebuffer);
- save_framebuffer = 0;
+ save_framebuffer = NULL;
}
break;
case PBOOK_SLEEP_NOW:
if (save_framebuffer) {
memcpy(p->screen_base, save_framebuffer, nb);
vfree(save_framebuffer);
- save_framebuffer = 0;
+ save_framebuffer = NULL;
}
chipsfb_blank(0, p);
break;
*
* Contributors (thanks, all!)
*
- * David Eger:
- * Overhaul for Linux 2.6
+ * David Eger:
+ * Overhaul for Linux 2.6
*
* Jeff Rugen:
* Major contributions; Motorola PowerStack (PPC and PCI) support,
* a run-time table?
*/
static const struct cirrusfb_board_info_rec {
- cirrusfb_board_t btype; /* chipset enum, not strictly necessary, as
- * cirrusfb_board_info[] is directly indexed
- * by this value */
char *name; /* ASCII name of chipset */
long maxclock[5]; /* maximum video clock */
/* for 1/4bpp, 8bpp 15/16bpp, 24bpp, 32bpp - numbers from xorg code */
unsigned char sr1f; /* SR1F VGA initial register value */
} cirrusfb_board_info[] = {
- { BT_NONE, }, /* dummy record */
- { BT_SD64,
- "CL SD64",
- { 140000, 140000, 140000, 140000, 140000, }, /* guess */
- /* the SD64/P4 have a higher max. videoclock */
- TRUE,
- TRUE,
- TRUE,
- 0xF0,
- 0xF0,
- 0, /* unused, does not multiplex */
- 0xF1,
- 0, /* unused, does not multiplex */
- 0x20 },
- { BT_PICCOLO,
- "CL Piccolo",
- { 90000, 90000, 90000, 90000, 90000 }, /* guess */
- TRUE,
- TRUE,
- FALSE,
- 0x80,
- 0x80,
- 0, /* unused, does not multiplex */
- 0x81,
- 0, /* unused, does not multiplex */
- 0x22 },
- { BT_PICASSO,
- "CL Picasso",
- { 90000, 90000, 90000, 90000, 90000, }, /* guess */
- TRUE,
- TRUE,
- FALSE,
- 0x20,
- 0x20,
- 0, /* unused, does not multiplex */
- 0x21,
- 0, /* unused, does not multiplex */
- 0x22 },
- { BT_SPECTRUM,
- "CL Spectrum",
- { 90000, 90000, 90000, 90000, 90000, }, /* guess */
- TRUE,
- TRUE,
- FALSE,
- 0x80,
- 0x80,
- 0, /* unused, does not multiplex */
- 0x81,
- 0, /* unused, does not multiplex */
- 0x22 },
- { BT_PICASSO4,
- "CL Picasso4",
- { 135100, 135100, 85500, 85500, 0 },
- TRUE,
- FALSE,
- TRUE,
- 0x20,
- 0x20,
- 0, /* unused, does not multiplex */
- 0x21,
- 0, /* unused, does not multiplex */
- 0 },
- { BT_ALPINE,
- "CL Alpine",
- { 85500, 85500, 50000, 28500, 0}, /* for the GD5430. GD5446 can do more... */
- TRUE,
- TRUE,
- TRUE,
- 0xA0,
- 0xA1,
- 0xA7,
- 0xA1,
- 0xA7,
- 0x1C },
- { BT_GD5480,
- "CL GD5480",
- { 135100, 200000, 200000, 135100, 135100 },
- TRUE,
- TRUE,
- TRUE,
- 0x10,
- 0x11,
- 0, /* unused, does not multiplex */
- 0x11,
- 0, /* unused, does not multiplex */
- 0x1C },
- { BT_LAGUNA,
- "CL Laguna",
- { 135100, 135100, 135100, 135100, 135100, }, /* guess */
- FALSE,
- FALSE,
- TRUE,
- 0, /* unused */
- 0, /* unused */
- 0, /* unused */
- 0, /* unused */
- 0, /* unused */
- 0 }, /* unused */
+ [BT_SD64] = {
+ .name = "CL SD64",
+ .maxclock = {
+ /* guess */
+ /* the SD64/P4 have a higher max. videoclock */
+ 140000, 140000, 140000, 140000, 140000,
+ },
+ .init_sr07 = TRUE,
+ .init_sr1f = TRUE,
+ .scrn_start_bit19 = TRUE,
+ .sr07 = 0xF0,
+ .sr07_1bpp = 0xF0,
+ .sr07_8bpp = 0xF1,
+ .sr1f = 0x20
+ },
+ [BT_PICCOLO] = {
+ .name = "CL Piccolo",
+ .maxclock = {
+ /* guess */
+ 90000, 90000, 90000, 90000, 90000
+ },
+ .init_sr07 = TRUE,
+ .init_sr1f = TRUE,
+ .scrn_start_bit19 = FALSE,
+ .sr07 = 0x80,
+ .sr07_1bpp = 0x80,
+ .sr07_8bpp = 0x81,
+ .sr1f = 0x22
+ },
+ [BT_PICASSO] = {
+ .name = "CL Picasso",
+ .maxclock = {
+ /* guess */
+ 90000, 90000, 90000, 90000, 90000
+ },
+ .init_sr07 = TRUE,
+ .init_sr1f = TRUE,
+ .scrn_start_bit19 = FALSE,
+ .sr07 = 0x20,
+ .sr07_1bpp = 0x20,
+ .sr07_8bpp = 0x21,
+ .sr1f = 0x22
+ },
+ [BT_SPECTRUM] = {
+ .name = "CL Spectrum",
+ .maxclock = {
+ /* guess */
+ 90000, 90000, 90000, 90000, 90000
+ },
+ .init_sr07 = TRUE,
+ .init_sr1f = TRUE,
+ .scrn_start_bit19 = FALSE,
+ .sr07 = 0x80,
+ .sr07_1bpp = 0x80,
+ .sr07_8bpp = 0x81,
+ .sr1f = 0x22
+ },
+ [BT_PICASSO4] = {
+ .name = "CL Picasso4",
+ .maxclock = {
+ 135100, 135100, 85500, 85500, 0
+ },
+ .init_sr07 = TRUE,
+ .init_sr1f = FALSE,
+ .scrn_start_bit19 = TRUE,
+ .sr07 = 0x20,
+ .sr07_1bpp = 0x20,
+ .sr07_8bpp = 0x21,
+ .sr1f = 0
+ },
+ [BT_ALPINE] = {
+ .name = "CL Alpine",
+ .maxclock = {
+ /* for the GD5430. GD5446 can do more... */
+ 85500, 85500, 50000, 28500, 0
+ },
+ .init_sr07 = TRUE,
+ .init_sr1f = TRUE,
+ .scrn_start_bit19 = TRUE,
+ .sr07 = 0xA0,
+ .sr07_1bpp = 0xA1,
+ .sr07_1bpp_mux = 0xA7,
+ .sr07_8bpp = 0xA1,
+ .sr07_8bpp_mux = 0xA7,
+ .sr1f = 0x1C
+ },
+ [BT_GD5480] = {
+ .name = "CL GD5480",
+ .maxclock = {
+ 135100, 200000, 200000, 135100, 135100
+ },
+ .init_sr07 = TRUE,
+ .init_sr1f = TRUE,
+ .scrn_start_bit19 = TRUE,
+ .sr07 = 0x10,
+ .sr07_1bpp = 0x11,
+ .sr07_8bpp = 0x11,
+ .sr1f = 0x1C
+ },
+ [BT_LAGUNA] = {
+ .name = "CL Laguna",
+ .maxclock = {
+ /* guess */
+ 135100, 135100, 135100, 135100, 135100,
+ },
+ .init_sr07 = FALSE,
+ .init_sr1f = FALSE,
+ .scrn_start_bit19 = TRUE,
+ }
};
{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_##id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (btype) }
static struct pci_device_id cirrusfb_pci_table[] = {
- CHIP( CIRRUS_5436, BT_ALPINE ),
- CHIP( CIRRUS_5434_8, BT_ALPINE ),
- CHIP( CIRRUS_5434_4, BT_ALPINE ),
- CHIP( CIRRUS_5430, BT_ALPINE ), /* GD-5440 has identical id */
- CHIP( CIRRUS_7543, BT_ALPINE ),
- CHIP( CIRRUS_7548, BT_ALPINE ),
+ CHIP( CIRRUS_5436, BT_ALPINE ),
+ CHIP( CIRRUS_5434_8, BT_ALPINE ),
+ CHIP( CIRRUS_5434_4, BT_ALPINE ),
+ CHIP( CIRRUS_5430, BT_ALPINE ), /* GD-5440 has identical id */
+ CHIP( CIRRUS_7543, BT_ALPINE ),
+ CHIP( CIRRUS_7548, BT_ALPINE ),
CHIP( CIRRUS_5480, BT_GD5480 ), /* MacPicasso probably */
CHIP( CIRRUS_5446, BT_PICASSO4 ), /* Picasso 4 is a GD5446 */
CHIP( CIRRUS_5462, BT_LAGUNA ), /* CL Laguna */
#ifdef CONFIG_ZORRO
+static const struct zorro_device_id cirrusfb_zorro_table[] = {
+ {
+ .id = ZORRO_PROD_HELFRICH_SD64_RAM,
+ .driver_data = BT_SD64,
+ }, {
+ .id = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
+ .driver_data = BT_PICCOLO,
+ }, {
+ .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
+ .driver_data = BT_PICASSO,
+ }, {
+ .id = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
+ .driver_data = BT_SPECTRUM,
+ }, {
+ .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3,
+ .driver_data = BT_PICASSO4,
+ },
+ { 0 }
+};
+
static const struct {
- cirrusfb_board_t btype;
- zorro_id id, id2;
+ zorro_id id2;
unsigned long size;
-} cirrusfb_zorro_probe_list[] __initdata = {
- { BT_SD64,
- ZORRO_PROD_HELFRICH_SD64_RAM,
- ZORRO_PROD_HELFRICH_SD64_REG,
- 0x400000 },
- { BT_PICCOLO,
- ZORRO_PROD_HELFRICH_PICCOLO_RAM,
- ZORRO_PROD_HELFRICH_PICCOLO_REG,
- 0x200000 },
- { BT_PICASSO,
- ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
- ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
- 0x200000 },
- { BT_SPECTRUM,
- ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
- ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
- 0x200000 },
- { BT_PICASSO4,
- ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3,
- 0,
- 0x400000 },
+} cirrusfb_zorro_table2[] = {
+ [BT_SD64] = {
+ .id2 = ZORRO_PROD_HELFRICH_SD64_REG,
+ .size = 0x400000
+ },
+ [BT_PICCOLO] = {
+ .id2 = ZORRO_PROD_HELFRICH_PICCOLO_REG,
+ .size = 0x200000
+ },
+ [BT_PICASSO] = {
+ .id2 = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
+ .size = 0x200000
+ },
+ [BT_SPECTRUM] = {
+ .id2 = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
+ .size = 0x200000
+ },
+ [BT_PICASSO4] = {
+ .id2 = 0,
+ .size = 0x400000
+ }
};
#endif /* CONFIG_ZORRO */
struct { u8 red, green, blue, pad; } palette[256];
#ifdef CONFIG_ZORRO
- unsigned long board_addr,
- board_size;
+ struct zorro_dev *zdev;
#endif
-
#ifdef CONFIG_PCI
struct pci_dev *pdev;
#endif
+ void (*unmap)(struct cirrusfb_info *cinfo);
};
static const struct {
const char *name;
struct fb_var_screeninfo var;
-} cirrusfb_predefined[] =
-
-{
- {"Autodetect", /* autodetect mode */
- {0}
- },
-
- {"640x480", /* 640x480, 31.25 kHz, 60 Hz, 25 MHz PixClock */
- {
- 640, 480, 640, 480, 0, 0, 8, 0,
- {0, 8, 0},
- {0, 8, 0},
- {0, 8, 0},
- {0, 0, 0},
- 0, 0, -1, -1, FB_ACCEL_NONE, 40000, 48, 16, 32, 8, 96, 4,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
- }
- },
-
- {"800x600", /* 800x600, 48 kHz, 76 Hz, 50 MHz PixClock */
- {
- 800, 600, 800, 600, 0, 0, 8, 0,
- {0, 8, 0},
- {0, 8, 0},
- {0, 8, 0},
- {0, 0, 0},
- 0, 0, -1, -1, FB_ACCEL_NONE, 20000, 128, 16, 24, 2, 96, 6,
- 0, FB_VMODE_NONINTERLACED
- }
- },
-
- /*
- Modeline from XF86Config:
- Mode "1024x768" 80 1024 1136 1340 1432 768 770 774 805
- */
- {"1024x768", /* 1024x768, 55.8 kHz, 70 Hz, 80 MHz PixClock */
- {
- 1024, 768, 1024, 768, 0, 0, 8, 0,
- {0, 8, 0},
- {0, 8, 0},
- {0, 8, 0},
- {0, 0, 0},
- 0, 0, -1, -1, FB_ACCEL_NONE, 12500, 144, 32, 30, 2, 192, 6,
- 0, FB_VMODE_NONINTERLACED
+} cirrusfb_predefined[] = {
+ {
+ /* autodetect mode */
+ .name = "Autodetect",
+ }, {
+ /* 640x480, 31.25 kHz, 60 Hz, 25 MHz PixClock */
+ .name = "640x480",
+ .var = {
+ .xres = 640,
+ .yres = 480,
+ .xres_virtual = 640,
+ .yres_virtual = 480,
+ .bits_per_pixel = 8,
+ .red = { .length = 8 },
+ .green = { .length = 8 },
+ .blue = { .length = 8 },
+ .width = -1,
+ .height = -1,
+ .pixclock = 40000,
+ .left_margin = 48,
+ .right_margin = 16,
+ .upper_margin = 32,
+ .lower_margin = 8,
+ .hsync_len = 96,
+ .vsync_len = 4,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED
+ }
+ }, {
+ /* 800x600, 48 kHz, 76 Hz, 50 MHz PixClock */
+ .name = "800x600",
+ .var = {
+ .xres = 800,
+ .yres = 600,
+ .xres_virtual = 800,
+ .yres_virtual = 600,
+ .bits_per_pixel = 8,
+ .red = { .length = 8 },
+ .green = { .length = 8 },
+ .blue = { .length = 8 },
+ .width = -1,
+ .height = -1,
+ .pixclock = 20000,
+ .left_margin = 128,
+ .right_margin = 16,
+ .upper_margin = 24,
+ .lower_margin = 2,
+ .hsync_len = 96,
+ .vsync_len = 6,
+ .vmode = FB_VMODE_NONINTERLACED
+ }
+ }, {
+ /*
+ * Modeline from XF86Config:
+ * Mode "1024x768" 80 1024 1136 1340 1432 768 770 774 805
+ */
+ /* 1024x768, 55.8 kHz, 70 Hz, 80 MHz PixClock */
+ .name = "1024x768",
+ .var = {
+ .xres = 1024,
+ .yres = 768,
+ .xres_virtual = 1024,
+ .yres_virtual = 768,
+ .bits_per_pixel = 8,
+ .red = { .length = 8 },
+ .green = { .length = 8 },
+ .blue = { .length = 8 },
+ .width = -1,
+ .height = -1,
+ .pixclock = 12500,
+ .left_margin = 144,
+ .right_margin = 32,
+ .upper_margin = 30,
+ .lower_margin = 2,
+ .hsync_len = 192,
+ .vsync_len = 6,
+ .vmode = FB_VMODE_NONINTERLACED
}
}
};
static struct fb_ops cirrusfb_ops = {
.owner = THIS_MODULE,
.fb_open = cirrusfb_open,
- .fb_release = cirrusfb_release,
+ .fb_release = cirrusfb_release,
.fb_setcolreg = cirrusfb_setcolreg,
.fb_check_var = cirrusfb_check_var,
.fb_set_par = cirrusfb_set_par,
DPRINTK (" (for GD54xx)\n");
vga_wseq (regbase, CL_SEQR7,
regs.multiplexing ?
- bi->sr07_1bpp_mux : bi->sr07_1bpp);
+ bi->sr07_1bpp_mux : bi->sr07_1bpp);
break;
case BT_LAGUNA:
DPRINTK (" (for GD54xx)\n");
vga_wseq (regbase, CL_SEQR7,
regs.multiplexing ?
- bi->sr07_8bpp_mux : bi->sr07_8bpp);
+ bi->sr07_8bpp_mux : bi->sr07_8bpp);
break;
case BT_LAGUNA:
}
-static void __devexit cirrusfb_pci_unmap (struct cirrusfb_info *cinfo)
+static void cirrusfb_pci_unmap (struct cirrusfb_info *cinfo)
{
struct pci_dev *pdev = cinfo->pdev;
framebuffer_release(cinfo->info);
pci_disable_device(pdev);
}
+#endif /* CONFIG_PCI */
+
+
+#ifdef CONFIG_ZORRO
+static void __devexit cirrusfb_zorro_unmap (struct cirrusfb_info *cinfo)
+{
+ zorro_release_device(cinfo->zdev);
+
+ if (cinfo->btype == BT_PICASSO4) {
+ cinfo->regbase -= 0x600000;
+ iounmap ((void *)cinfo->regbase);
+ iounmap ((void *)cinfo->fbmem);
+ } else {
+ if (zorro_resource_start(cinfo->zdev) > 0x01000000)
+ iounmap ((void *)cinfo->fbmem);
+ }
+ framebuffer_release(cinfo->info);
+}
+#endif /* CONFIG_ZORRO */
+
+static int cirrusfb_set_fbinfo(struct cirrusfb_info *cinfo)
+{
+ struct fb_info *info = cinfo->info;
+ struct fb_var_screeninfo *var = &info->var;
+
+ info->currcon = -1;
+ info->par = cinfo;
+ info->pseudo_palette = cinfo->pseudo_palette;
+ info->flags = FBINFO_DEFAULT
+ | FBINFO_HWACCEL_XPAN
+ | FBINFO_HWACCEL_YPAN
+ | FBINFO_HWACCEL_FILLRECT
+ | FBINFO_HWACCEL_COPYAREA;
+ if (noaccel)
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ info->fbops = &cirrusfb_ops;
+ info->screen_base = cinfo->fbmem;
+ if (cinfo->btype == BT_GD5480) {
+ if (var->bits_per_pixel == 16)
+ info->screen_base += 1 * MB_;
+ if (var->bits_per_pixel == 24 || var->bits_per_pixel == 32)
+ info->screen_base += 2 * MB_;
+ }
+
+ /* Fill fix common fields */
+ strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
+ sizeof(info->fix.id));
+
+ /* monochrome: only 1 memory plane */
+ /* 8 bit and above: Use whole memory area */
+ info->fix.smem_start = cinfo->fbmem_phys;
+ info->fix.smem_len = (var->bits_per_pixel == 1) ? cinfo->size / 4 : cinfo->size;
+ info->fix.type = cinfo->currentmode.type;
+ info->fix.type_aux = 0;
+ info->fix.visual = cinfo->currentmode.visual;
+ info->fix.xpanstep = 1;
+ info->fix.ypanstep = 1;
+ info->fix.ywrapstep = 0;
+ info->fix.line_length = cinfo->currentmode.line_length;
+
+ /* FIXME: map region at 0xB8000 if available, fill in here */
+ info->fix.mmio_start = cinfo->fbregs_phys;
+ info->fix.mmio_len = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+
+ fb_alloc_cmap(&info->cmap, 256, 0);
+
+ return 0;
+}
+static int cirrusfb_register(struct cirrusfb_info *cinfo)
+{
+ struct fb_info *info;
+ int err;
+ cirrusfb_board_t btype;
+
+ DPRINTK ("ENTER\n");
-static struct cirrusfb_info *cirrusfb_pci_setup (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ printk (KERN_INFO "cirrusfb: Driver for Cirrus Logic based graphic boards, v" CIRRUSFB_VERSION "\n");
+
+ info = cinfo->info;
+ btype = cinfo->btype;
+
+ /* sanity checks */
+ assert (btype != BT_NONE);
+
+ DPRINTK ("cirrusfb: (RAM start set to: 0x%p)\n", cinfo->fbmem);
+
+ /* Make pretend we've set the var so our structures are in a "good" */
+ /* state, even though we haven't written the mode to the hw yet... */
+ info->var = cirrusfb_predefined[cirrusfb_def_mode].var;
+ info->var.activate = FB_ACTIVATE_NOW;
+
+ err = cirrusfb_decode_var(&info->var, &cinfo->currentmode, info);
+ if (err < 0) {
+ /* should never happen */
+ DPRINTK("choking on default var... umm, no good.\n");
+ goto err_unmap_cirrusfb;
+ }
+
+ /* set all the vital stuff */
+ cirrusfb_set_fbinfo(cinfo);
+
+ err = register_framebuffer(info);
+ if (err < 0) {
+ printk (KERN_ERR "cirrusfb: could not register fb device; err = %d!\n", err);
+ goto err_dealloc_cmap;
+ }
+
+ DPRINTK ("EXIT, returning 0\n");
+ return 0;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&info->cmap);
+err_unmap_cirrusfb:
+ cinfo->unmap(cinfo);
+ return err;
+}
+
+static void __devexit cirrusfb_cleanup (struct fb_info *info)
+{
+ struct cirrusfb_info *cinfo = info->par;
+ DPRINTK ("ENTER\n");
+
+ switch_monitor (cinfo, 0);
+
+ unregister_framebuffer (info);
+ fb_dealloc_cmap (&info->cmap);
+ printk ("Framebuffer unregistered\n");
+ cinfo->unmap(cinfo);
+
+ DPRINTK ("EXIT\n");
+}
+
+
+#ifdef CONFIG_PCI
+static int cirrusfb_pci_register (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct cirrusfb_info *cinfo;
struct fb_info *info;
cinfo->fbmem_phys = board_addr;
cinfo->size = board_size;
+ cinfo->unmap = cirrusfb_pci_unmap;
printk (" RAM (%lu kB) at 0xx%lx, ", cinfo->size / KB_, board_addr);
printk ("Cirrus Logic chipset on PCI bus\n");
+ pci_set_drvdata(pdev, info);
- return cinfo;
+ return cirrusfb_register(cinfo);
err_release_legacy:
if (release_io_ports)
err_disable:
pci_disable_device(pdev);
err_out:
- return ERR_PTR(ret);
+ return ret;
}
-#endif /* CONFIG_PCI */
-
-
-
-#ifdef CONFIG_ZORRO
-static int cirrusfb_zorro_find (struct zorro_dev **z_o,
- struct zorro_dev **z2_o,
- cirrusfb_board_t *btype, unsigned long *size)
+void __devexit cirrusfb_pci_unregister (struct pci_dev *pdev)
{
- struct zorro_dev *z = NULL;
- int i;
-
- assert (z_o != NULL);
- assert (btype != NULL);
-
- for (i = 0; i < ARRAY_SIZE(cirrusfb_zorro_probe_list); i++)
- if ((z = zorro_find_device(cirrusfb_zorro_probe_list[i].id, NULL)))
- break;
-
- if (z) {
- *z_o = z;
- if (cirrusfb_zorro_probe_list[i].id2)
- *z2_o = zorro_find_device(cirrusfb_zorro_probe_list[i].id2, NULL);
- else
- *z2_o = NULL;
-
- *btype = cirrusfb_zorro_probe_list[i].btype;
- *size = cirrusfb_zorro_probe_list[i].size;
-
- printk (KERN_INFO "cirrusfb: %s board detected; ",
- cirrusfb_board_info[*btype].name);
+ struct fb_info *info = pci_get_drvdata(pdev);
+ DPRINTK ("ENTER\n");
- return 0;
- }
+ cirrusfb_cleanup (info);
- printk (KERN_NOTICE "cirrusfb: no supported board found.\n");
- return -ENODEV;
+ DPRINTK ("EXIT\n");
}
-
-static void __devexit cirrusfb_zorro_unmap (struct cirrusfb_info *cinfo)
-{
- release_mem_region(cinfo->board_addr, cinfo->board_size);
-
- if (cinfo->btype == BT_PICASSO4) {
- cinfo->regbase -= 0x600000;
- iounmap ((void *)cinfo->regbase);
- iounmap ((void *)cinfo->fbmem);
- } else {
- if (cinfo->board_addr > 0x01000000)
- iounmap ((void *)cinfo->fbmem);
- }
- framebuffer_release(cinfo->info);
-}
+static struct pci_driver cirrusfb_pci_driver = {
+ .name = "cirrusfb",
+ .id_table = cirrusfb_pci_table,
+ .probe = cirrusfb_pci_register,
+ .remove = __devexit_p(cirrusfb_pci_unregister),
+#ifdef CONFIG_PM
+#if 0
+ .suspend = cirrusfb_pci_suspend,
+ .resume = cirrusfb_pci_resume,
+#endif
+#endif
+};
+#endif /* CONFIG_PCI */
-static struct cirrusfb_info *cirrusfb_zorro_setup(void)
+#ifdef CONFIG_ZORRO
+static int cirrusfb_zorro_register(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
{
struct cirrusfb_info *cinfo;
struct fb_info *info;
cirrusfb_board_t btype;
- struct zorro_dev *z = NULL, *z2 = NULL;
+ struct zorro_dev *z2 = NULL;
unsigned long board_addr, board_size, size;
int ret;
- ret = cirrusfb_zorro_find (&z, &z2, &btype, &size);
- if (ret < 0)
- goto err_out;
+ btype = ent->driver_data;
+ if (cirrusfb_zorro_table2[btype].id2)
+ z2 = zorro_find_device(cirrusfb_zorro_table2[btype].id2, NULL);
+ size = cirrusfb_zorro_table2[btype].size;
+ printk(KERN_INFO "cirrusfb: %s board detected; ",
+ cirrusfb_board_info[btype].name);
info = framebuffer_alloc(sizeof(struct cirrusfb_info), &z->dev);
if (!info) {
assert (z2 >= 0);
assert (btype != BT_NONE);
- cinfo->board_addr = board_addr = z->resource.start;
- cinfo->board_size = board_size = z->resource.end-z->resource.start+1;
+ cinfo->zdev = z;
+ board_addr = zorro_resource_start(z);
+ board_size = zorro_resource_len(z);
cinfo->size = size;
- if (!request_mem_region(board_addr, board_size, "cirrusfb")) {
+ if (!zorro_request_device(z, "cirrusfb")) {
printk(KERN_ERR "cirrusfb: cannot reserve region 0x%lx, abort\n",
board_addr);
ret = -EBUSY;
cinfo->fbregs_phys = board_addr + 0x600000;
cinfo->fbmem_phys = board_addr + 16777216;
- cinfo->fbmem = ioremap (info->fbmem_phys, 16777216);
+ cinfo->fbmem = ioremap (cinfo->fbmem_phys, 16777216);
if (!cinfo->fbmem)
goto err_unmap_regbase;
} else {
DPRINTK ("cirrusfb: Virtual address for board set to: $%p\n", cinfo->regbase);
}
+ cinfo->unmap = cirrusfb_zorro_unmap;
printk (KERN_INFO "Cirrus Logic chipset on Zorro bus\n");
+ zorro_set_drvdata(z, info);
- return 0;
+ return cirrusfb_register(cinfo);
err_unmap_regbase:
/* Parental advisory: explicit hack */
err_release_fb:
framebuffer_release(info);
err_out:
- return ERR_PTR(ret);
+ return ret;
}
-#endif /* CONFIG_ZORRO */
-static int cirrusfb_set_fbinfo(struct cirrusfb_info *cinfo)
+void __devexit cirrusfb_zorro_unregister(struct zorro_dev *z)
{
- struct fb_info *info = cinfo->info;
- struct fb_var_screeninfo *var = &info->var;
-
- info->currcon = -1;
- info->par = cinfo;
- info->pseudo_palette = cinfo->pseudo_palette;
- info->flags = FBINFO_DEFAULT
- | FBINFO_HWACCEL_XPAN
- | FBINFO_HWACCEL_YPAN
- | FBINFO_HWACCEL_FILLRECT
- | FBINFO_HWACCEL_COPYAREA;
- if (noaccel)
- info->flags |= FBINFO_HWACCEL_DISABLED;
- info->fbops = &cirrusfb_ops;
- info->screen_base = cinfo->fbmem;
- if (cinfo->btype == BT_GD5480) {
- if (var->bits_per_pixel == 16)
- info->screen_base += 1 * MB_;
- if (var->bits_per_pixel == 24 || var->bits_per_pixel == 32)
- info->screen_base += 2 * MB_;
- }
-
- /* Fill fix common fields */
- strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
- sizeof(info->fix.id));
-
- /* monochrome: only 1 memory plane */
- /* 8 bit and above: Use whole memory area */
- info->fix.smem_start = cinfo->fbmem_phys;
- info->fix.smem_len = (var->bits_per_pixel == 1) ? cinfo->size / 4 : cinfo->size;
- info->fix.type = cinfo->currentmode.type;
- info->fix.type_aux = 0;
- info->fix.visual = cinfo->currentmode.visual;
- info->fix.xpanstep = 1;
- info->fix.ypanstep = 1;
- info->fix.ywrapstep = 0;
- info->fix.line_length = cinfo->currentmode.line_length;
-
- /* FIXME: map region at 0xB8000 if available, fill in here */
- info->fix.mmio_start = cinfo->fbregs_phys;
- info->fix.mmio_len = 0;
- info->fix.accel = FB_ACCEL_NONE;
-
- fb_alloc_cmap(&info->cmap, 256, 0);
-
- return 0;
-}
-
-#if defined(CONFIG_PCI)
-#define cirrusfb_unmap cirrusfb_pci_unmap
-#define cirrusfb_bus_setup cirrusfb_pci_setup
-#elif defined(CONFIG_ZORRO)
-#define cirrusfb_unmap cirrusfb_zorro_unmap
-#define cirrusfb_bus_setup cirrusfb_zorro_setup
-#endif
-
-
-static int cirrusfb_pci_register (struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct fb_info *info;
- struct cirrusfb_info *cinfo = NULL;
- int err;
- cirrusfb_board_t btype;
-
- DPRINTK ("ENTER\n");
-
- printk (KERN_INFO "cirrusfb: Driver for Cirrus Logic based graphic boards, v" CIRRUSFB_VERSION "\n");
-
- cinfo = cirrusfb_bus_setup(pdev, ent);
-
- if (IS_ERR(cinfo)) {
- err = PTR_ERR(cinfo);
- goto err_out;
- }
-
- info = cinfo->info;
- btype = cinfo->btype;
-
- /* sanity checks */
- assert (btype != BT_NONE);
- assert (btype == cirrusfb_board_info[btype].btype);
-
- DPRINTK ("cirrusfb: (RAM start set to: 0x%p)\n", cinfo->fbmem);
-
- /* Make pretend we've set the var so our structures are in a "good" */
- /* state, even though we haven't written the mode to the hw yet... */
- info->var = cirrusfb_predefined[cirrusfb_def_mode].var;
- info->var.activate = FB_ACTIVATE_NOW;
-
- err = cirrusfb_decode_var(&info->var, &cinfo->currentmode, info);
- if (err < 0) {
- /* should never happen */
- DPRINTK("choking on default var... umm, no good.\n");
- goto err_unmap_cirrusfb;
- }
-
- /* set all the vital stuff */
- cirrusfb_set_fbinfo(cinfo);
-
- pci_set_drvdata(pdev, info);
-
- err = register_framebuffer(info);
- if (err < 0) {
- printk (KERN_ERR "cirrusfb: could not register fb device; err = %d!\n", err);
- goto err_dealloc_cmap;
- }
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-
-err_dealloc_cmap:
- fb_dealloc_cmap(&info->cmap);
-err_unmap_cirrusfb:
- cirrusfb_unmap(cinfo);
-err_out:
- return err;
-}
-
-
-static void __devexit cirrusfb_cleanup (struct fb_info *info)
-{
- struct cirrusfb_info *cinfo = info->par;
- DPRINTK ("ENTER\n");
-
-#ifdef CONFIG_ZORRO
- switch_monitor (cinfo, 0);
-#endif
-
- unregister_framebuffer (info);
- fb_dealloc_cmap (&info->cmap);
- printk ("Framebuffer unregistered\n");
- cirrusfb_unmap (cinfo);
-
- DPRINTK ("EXIT\n");
-}
-
-
-void __devexit cirrusfb_pci_unregister (struct pci_dev *pdev)
-{
- struct fb_info *info = pci_get_drvdata(pdev);
+ struct fb_info *info = zorro_get_drvdata(z);
DPRINTK ("ENTER\n");
cirrusfb_cleanup (info);
DPRINTK ("EXIT\n");
}
-static struct pci_driver cirrusfb_driver = {
- .name = "cirrusfb",
- .id_table = cirrusfb_pci_table,
- .probe = cirrusfb_pci_register,
- .remove = __devexit_p(cirrusfb_pci_unregister),
-#ifdef CONFIG_PM
-#if 0
- .suspend = cirrusfb_pci_suspend,
- .resume = cirrusfb_pci_resume,
-#endif
-#endif
+static struct zorro_driver cirrusfb_zorro_driver = {
+ .name = "cirrusfb",
+ .id_table = cirrusfb_zorro_table,
+ .probe = cirrusfb_zorro_register,
+ .remove = __devexit_p(cirrusfb_zorro_unregister),
};
+#endif /* CONFIG_ZORRO */
int __init cirrusfb_init(void)
{
+ int error = 0;
+
#ifdef CONFIG_ZORRO
- return cirrusfb_pci_register(NULL, NULL);
-#else
- return pci_module_init(&cirrusfb_driver);
+ error |= zorro_module_init(&cirrusfb_zorro_driver);
#endif
+#ifdef CONFIG_PCI
+ error |= pci_module_init(&cirrusfb_pci_driver);
+#endif
+ return error;
}
void __exit cirrusfb_exit (void)
{
- pci_unregister_driver (&cirrusfb_driver);
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&cirrusfb_pci_driver);
+#endif
+#ifdef CONFIG_ZORRO
+ zorro_unregister_driver(&cirrusfb_zorro_driver);
+#endif
}
#ifdef MODULE
static void cirrusfb_BitBLT (caddr_t regbase, int bits_per_pixel,
u_short curx, u_short cury, u_short destx, u_short desty,
- u_short width, u_short height, u_short line_length)
+ u_short width, u_short height, u_short line_length)
{
u_short nwidth, nheight;
u_long nsrc, ndest;
.con_bmove = DUMMY,
.con_switch = DUMMY,
.con_blank = DUMMY,
- .con_font_op = DUMMY,
+ .con_font_set = DUMMY,
+ .con_font_get = DUMMY,
+ .con_font_default = DUMMY,
+ .con_font_copy = DUMMY,
.con_set_palette = DUMMY,
.con_scrolldelta = DUMMY,
};
int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
-static int fbcon_font_op(struct vc_data *vc, struct console_font_op *op);
static int fbcon_set_palette(struct vc_data *vc, unsigned char *table);
static int fbcon_scrolldelta(struct vc_data *vc, int lines);
void accel_clear_margins(struct vc_data *vc, struct fb_info *info,
p->userfont = 0;
}
-static inline int fbcon_get_font(struct vc_data *vc, struct console_font_op *op)
+static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
{
u8 *fontdata = vc->vc_font.data;
- u8 *data = op->data;
+ u8 *data = font->data;
int i, j;
- op->width = vc->vc_font.width;
- op->height = vc->vc_font.height;
- op->charcount = vc->vc_hi_font_mask ? 512 : 256;
- if (!op->data)
+ font->width = vc->vc_font.width;
+ font->height = vc->vc_font.height;
+ font->charcount = vc->vc_hi_font_mask ? 512 : 256;
+ if (!font->data)
return 0;
- if (op->width <= 8) {
+ if (font->width <= 8) {
j = vc->vc_font.height;
- for (i = 0; i < op->charcount; i++) {
+ for (i = 0; i < font->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 32 - j);
data += 32;
fontdata += j;
}
- } else if (op->width <= 16) {
+ } else if (font->width <= 16) {
j = vc->vc_font.height * 2;
- for (i = 0; i < op->charcount; i++) {
+ for (i = 0; i < font->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 64 - j);
data += 64;
fontdata += j;
}
- } else if (op->width <= 24) {
- for (i = 0; i < op->charcount; i++) {
+ } else if (font->width <= 24) {
+ for (i = 0; i < font->charcount; i++) {
for (j = 0; j < vc->vc_font.height; j++) {
*data++ = fontdata[0];
*data++ = fontdata[1];
}
} else {
j = vc->vc_font.height * 4;
- for (i = 0; i < op->charcount; i++) {
+ for (i = 0; i < font->charcount; i++) {
memcpy(data, fontdata, j);
memset(data + j, 0, 128 - j);
data += 128;
return 0;
}
-static int fbcon_do_set_font(struct vc_data *vc, struct console_font_op *op,
+static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
u8 * data, int userfont)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
int resize;
- int w = op->width;
- int h = op->height;
int cnt;
char *old_data = NULL;
- if (!w > 32) {
- if (userfont && op->op != KD_FONT_OP_COPY)
- kfree(data - FONT_EXTRA_WORDS * sizeof(int));
- return -ENXIO;
- }
-
if (CON_IS_VISIBLE(vc) && softback_lines)
fbcon_set_origin(vc);
return 0;
}
-static inline int fbcon_copy_font(struct vc_data *vc, struct console_font_op *op)
+static int fbcon_copy_font(struct vc_data *vc, int con)
{
- struct display *od;
- int h = op->height;
-
- if (h < 0 || !vc_cons_allocated(h))
- return -ENOTTY;
- if (h == vc->vc_num)
- return 0; /* nothing to do */
- od = &fb_display[h];
- if (od->fontdata == vc->vc_font.data)
+ struct display *od = &fb_display[con];
+ struct console_font *f = &vc->vc_font;
+
+ if (od->fontdata == f->data)
return 0; /* already the same font... */
- op->width = vc->vc_font.width;
- op->height = vc->vc_font.height;
- return fbcon_do_set_font(vc, op, od->fontdata, od->userfont);
+ return fbcon_do_set_font(vc, f->width, f->height, od->fontdata, od->userfont);
}
-static inline int fbcon_set_font(struct vc_data *vc, struct console_font_op *op)
+/*
+ * User asked to set font; we are guaranteed that
+ * a) width and height are in range 1..32
+ * b) charcount does not exceed 512
+ */
+
+static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigned flags)
{
- int w = op->width;
- int h = op->height;
+ unsigned charcount = font->charcount;
+ int w = font->width;
+ int h = font->height;
int size = h;
int i, k;
- u8 *new_data, *data = op->data, *p;
+ u8 *new_data, *data = font->data, *p;
- if ((w <= 0) || (w > 32)
- || (op->charcount != 256 && op->charcount != 512))
+ if (charcount != 256 && charcount != 512)
return -EINVAL;
if (w > 8) {
else
size *= 4;
}
- size *= op->charcount;
+ size *= charcount;
+
+ new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
- if (!
- (new_data =
- kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER)))
+ if (!new_data)
return -ENOMEM;
+
new_data += FONT_EXTRA_WORDS * sizeof(int);
FNTSIZE(new_data) = size;
- FNTCHARCNT(new_data) = op->charcount;
+ FNTCHARCNT(new_data) = charcount;
REFCOUNT(new_data) = 0; /* usage counter */
p = new_data;
if (w <= 8) {
- for (i = 0; i < op->charcount; i++) {
+ for (i = 0; i < charcount; i++) {
memcpy(p, data, h);
data += 32;
p += h;
}
} else if (w <= 16) {
h *= 2;
- for (i = 0; i < op->charcount; i++) {
+ for (i = 0; i < charcount; i++) {
memcpy(p, data, h);
data += 64;
p += h;
}
} else if (w <= 24) {
- for (i = 0; i < op->charcount; i++) {
+ for (i = 0; i < charcount; i++) {
int j;
for (j = 0; j < h; j++) {
memcpy(p, data, 3);
}
} else {
h *= 4;
- for (i = 0; i < op->charcount; i++) {
+ for (i = 0; i < charcount; i++) {
memcpy(p, data, h);
data += 128;
p += h;
break;
}
}
- return fbcon_do_set_font(vc, op, new_data, 1);
+ return fbcon_do_set_font(vc, font->width, font->height, new_data, 1);
}
-static inline int fbcon_set_def_font(struct vc_data *vc, struct console_font_op *op)
+static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
- char name[MAX_FONT_NAME];
struct font_desc *f;
- if (!op->data)
+ if (!name)
f = get_default_font(info->var.xres, info->var.yres);
- else if (strncpy_from_user(name, op->data, MAX_FONT_NAME - 1) < 0)
- return -EFAULT;
- else {
- name[MAX_FONT_NAME - 1] = 0;
- if (!(f = find_font(name)))
- return -ENOENT;
- }
- op->width = f->width;
- op->height = f->height;
- return fbcon_do_set_font(vc, op, f->data, 0);
-}
+ else if (!(f = find_font(name)))
+ return -ENOENT;
-static int fbcon_font_op(struct vc_data *vc, struct console_font_op *op)
-{
- switch (op->op) {
- case KD_FONT_OP_SET:
- return fbcon_set_font(vc, op);
- case KD_FONT_OP_GET:
- return fbcon_get_font(vc, op);
- case KD_FONT_OP_SET_DEFAULT:
- return fbcon_set_def_font(vc, op);
- case KD_FONT_OP_COPY:
- return fbcon_copy_font(vc, op);
- default:
- return -ENOSYS;
- }
+ font->width = f->width;
+ font->height = f->height;
+ return fbcon_do_set_font(vc, f->width, f->height, f->data, 0);
}
static u16 palette_red[16];
else
palette_cmap.len = 16;
palette_cmap.start = 0;
- return fb_set_cmap(&palette_cmap, 1, info);
+ return fb_set_cmap(&palette_cmap, info);
}
static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
.con_bmove = fbcon_bmove,
.con_switch = fbcon_switch,
.con_blank = fbcon_blank,
- .con_font_op = fbcon_font_op,
+ .con_font_set = fbcon_set_font,
+ .con_font_get = fbcon_get_font,
+ .con_font_default = fbcon_set_def_font,
+ .con_font_copy = fbcon_copy_font,
.con_set_palette = fbcon_set_palette,
.con_scrolldelta = fbcon_scrolldelta,
.con_set_origin = fbcon_set_origin,
}
}
-static int mdacon_font_op(struct vc_data *c, struct console_font_op *op)
-{
- return -ENOSYS;
-}
-
static int mdacon_scrolldelta(struct vc_data *c, int lines)
{
return 0;
.con_bmove = mdacon_bmove,
.con_switch = mdacon_switch,
.con_blank = mdacon_blank,
- .con_font_op = mdacon_font_op,
.con_set_palette = mdacon_set_palette,
.con_scrolldelta = mdacon_scrolldelta,
.con_build_attr = mdacon_build_attr,
static int newport_xsize;
static int newport_ysize;
-static int newport_set_def_font(int unit, struct console_font_op *op);
+static int newport_set_def_font(int unit, struct console_font *op);
#define BMASK(c) (c << 24)
return 1;
}
-static int newport_set_font(int unit, struct console_font_op *op)
+static int newport_set_font(int unit, struct console_font *op)
{
int w = op->width;
int h = op->height;
return 0;
}
-static int newport_set_def_font(int unit, struct console_font_op *op)
+static int newport_set_def_font(int unit, struct console_font *op)
{
if (font_data[unit] != FONT_DATA) {
if (--REFCOUNT(font_data[unit]) == 0)
return 0;
}
-static int newport_font_op(struct vc_data *vc, struct console_font_op *op)
+static int newport_font_default(struct vc_data *vc, struct console_font *op, char *name)
{
- int unit = vc->vc_num;
-
- switch (op->op) {
- case KD_FONT_OP_SET:
- return newport_set_font(unit, op);
- case KD_FONT_OP_SET_DEFAULT:
- return newport_set_def_font(unit, op);
- default:
- return -ENOSYS;
- }
+ return newport_set_def_font(vc->vc_num, op);
+}
+
+static int newport_font_set(struct vc_data *vc, struct console_font *font, unsigned flags)
+{
+ return newport_set_font(vc->vc_num, font);
}
static int newport_set_palette(struct vc_data *vc, unsigned char *table)
.con_bmove = newport_bmove,
.con_switch = newport_switch,
.con_blank = newport_blank,
- .con_font_op = newport_font_op,
+ .con_font_set = newport_font_set,
+ .con_font_default = newport_font_default,
.con_set_palette = newport_set_palette,
.con_scrolldelta = newport_scrolldelta,
.con_set_origin = DUMMY,
}
}
-static int
-promcon_font_op(struct vc_data *conp, struct console_font_op *op)
-{
- return -ENOSYS;
-}
-
static int
promcon_blank(struct vc_data *conp, int blank, int mode_switch)
{
.con_bmove = promcon_bmove,
.con_switch = promcon_switch,
.con_blank = promcon_blank,
- .con_font_op = promcon_font_op,
.con_set_palette = DUMMY,
.con_scrolldelta = DUMMY,
#if !(PROMCON_COLOR)
return -EINVAL;
}
-static int sticon_font_op(struct vc_data *c, struct console_font_op *op)
-{
- return -ENOSYS;
-}
-
static void sticon_putc(struct vc_data *conp, int c, int ypos, int xpos)
{
int unit = conp->vc_num;
.con_bmove = sticon_bmove,
.con_switch = sticon_switch,
.con_blank = sticon_blank,
- .con_font_op = sticon_font_op,
.con_set_palette = sticon_set_palette,
.con_scrolldelta = sticon_scrolldelta,
.con_set_origin = sticon_set_origin,
static void vgacon_cursor(struct vc_data *c, int mode);
static int vgacon_switch(struct vc_data *c);
static int vgacon_blank(struct vc_data *c, int blank, int mode_switch);
-static int vgacon_font_op(struct vc_data *c, struct console_font_op *op);
static int vgacon_set_palette(struct vc_data *vc, unsigned char *table);
static int vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
return 0;
}
-static int vgacon_font_op(struct vc_data *c, struct console_font_op *op)
+static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigned flags)
{
+ unsigned charcount = font->charcount;
int rc;
if (vga_video_type < VIDEO_TYPE_EGAM)
return -EINVAL;
- if (op->op == KD_FONT_OP_SET) {
- if (op->width != 8
- || (op->charcount != 256 && op->charcount != 512))
- return -EINVAL;
- rc = vgacon_do_font_op(&state, op->data, 1, op->charcount == 512);
- if (!rc && !(op->flags & KD_FONT_FLAG_DONT_RECALC))
- rc = vgacon_adjust_height(c, op->height);
- } else if (op->op == KD_FONT_OP_GET) {
- op->width = 8;
- op->height = c->vc_font.height;
- op->charcount = vga_512_chars ? 512 : 256;
- if (!op->data)
- return 0;
- rc = vgacon_do_font_op(&state, op->data, 0, 0);
- } else
- rc = -ENOSYS;
+ if (font->width != 8 || (charcount != 256 && charcount != 512))
+ return -EINVAL;
+
+ rc = vgacon_do_font_op(&state, font->data, 1, charcount == 512);
+ if (rc)
+ return rc;
+
+ if (!(flags & KD_FONT_FLAG_DONT_RECALC))
+ rc = vgacon_adjust_height(c, font->height);
return rc;
}
-#else
-
-static int vgacon_font_op(struct vc_data *c, struct console_font_op *op)
+static int vgacon_font_get(struct vc_data *c, struct console_font *font)
{
- return -ENOSYS;
+ if (vga_video_type < VIDEO_TYPE_EGAM)
+ return -EINVAL;
+
+ font->width = 8;
+ font->height = c->vc_font.height;
+ font->charcount = vga_512_chars ? 512 : 256;
+ if (!font->data)
+ return 0;
+ return vgacon_do_font_op(&state, font->data, 0, 0);
}
+#else
+
+#define vgacon_font_set NULL
+#define vgacon_font_get NULL
+
#endif
static int vgacon_scrolldelta(struct vc_data *c, int lines)
.con_bmove = DUMMY,
.con_switch = vgacon_switch,
.con_blank = vgacon_blank,
- .con_font_op = vgacon_font_op,
+ .con_font_set = vgacon_font_set,
+ .con_font_get = vgacon_font_get,
.con_set_palette = vgacon_set_palette,
.con_scrolldelta = vgacon_scrolldelta,
.con_set_origin = vgacon_set_origin,
};
struct fb_var_screeninfo dnfb_var __devinitdata = {
- .xres 1280,
- .yres 1024,
- .xres_virtual 2048,
- .yres_virtual 1024,
- .bits_per_pixel 1,
- .height -1,
- .width -1,
- .vmode FB_VMODE_NONINTERLACED,
+ .xres = 1280,
+ .yres = 1024,
+ .xres_virtual = 2048,
+ .yres_virtual = 1024,
+ .bits_per_pixel = 1,
+ .height = -1,
+ .width = -1,
+ .vmode = FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo dnfb_fix __devinitdata = {
- .id "Apollo Mono",
- .smem_start (FRAME_BUFFER_START + IO_BASE),
- .smem_len FRAME_BUFFER_LEN,
- .type FB_TYPE_PACKED_PIXELS,
- .visual FB_VISUAL_MONO10,
- .line_length 256,
+ .id = "Apollo Mono",
+ .smem_start = (FRAME_BUFFER_START + IO_BASE),
+ .smem_len = FRAME_BUFFER_LEN,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO10,
+ .line_length = 256,
};
static int dnfb_blank(int blank, struct fb_info *info)
}
cmap->start = 0;
cmap->len = len;
- fb_copy_cmap(fb_default_cmap(len), cmap, 0);
+ fb_copy_cmap(fb_default_cmap(len), cmap);
return 0;
fail:
* fb_copy_cmap - copy a colormap
* @from: frame buffer colormap structure
* @to: frame buffer colormap structure
- * @fsfromto: determine copy method
*
* Copy contents of colormap from @from to @to.
- *
- * @fsfromto accepts the following integer parameters:
- * 0: memcpy function
- * 1: copy_from_user() function to copy from userspace
- * 2: copy_to_user() function to copy to userspace
- *
*/
-int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to, int fsfromto)
+int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to)
{
- int tooff = 0, fromoff = 0;
- int size;
-
- if (to->start > from->start)
- fromoff = to->start-from->start;
- else
- tooff = from->start-to->start;
- size = to->len-tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len-fromoff;
- if (size <= 0)
- return -EINVAL;
- size *= sizeof(u16);
-
- switch (fsfromto) {
- case 0:
+ int tooff = 0, fromoff = 0;
+ int size;
+
+ if (to->start > from->start)
+ fromoff = to->start - from->start;
+ else
+ tooff = from->start - to->start;
+ size = to->len - tooff;
+ if (size > (int) (from->len - fromoff))
+ size = from->len - fromoff;
+ if (size <= 0)
+ return -EINVAL;
+ size *= sizeof(u16);
+
memcpy(to->red+tooff, from->red+fromoff, size);
memcpy(to->green+tooff, from->green+fromoff, size);
memcpy(to->blue+tooff, from->blue+fromoff, size);
if (from->transp && to->transp)
- memcpy(to->transp+tooff, from->transp+fromoff, size);
- break;
- case 1:
- if (copy_from_user(to->red+tooff, from->red+fromoff, size))
- return -EFAULT;
- if (copy_from_user(to->green+tooff, from->green+fromoff, size))
- return -EFAULT;
- if (copy_from_user(to->blue+tooff, from->blue+fromoff, size))
- return -EFAULT;
- if (from->transp && to->transp)
- if (copy_from_user(to->transp+tooff, from->transp+fromoff, size))
- return -EFAULT;
- break;
- case 2:
+ memcpy(to->transp+tooff, from->transp+fromoff, size);
+ return 0;
+}
+
+int fb_cmap_to_user(struct fb_cmap *from, struct fb_cmap_user *to)
+{
+ int tooff = 0, fromoff = 0;
+ int size;
+
+ if (to->start > from->start)
+ fromoff = to->start - from->start;
+ else
+ tooff = from->start - to->start;
+ size = to->len - tooff;
+ if (size > (int) (from->len - fromoff))
+ size = from->len - fromoff;
+ if (size <= 0)
+ return -EINVAL;
+ size *= sizeof(u16);
+
if (copy_to_user(to->red+tooff, from->red+fromoff, size))
return -EFAULT;
if (copy_to_user(to->green+tooff, from->green+fromoff, size))
if (from->transp && to->transp)
if (copy_to_user(to->transp+tooff, from->transp+fromoff, size))
return -EFAULT;
- break;
- }
- return 0;
+ return 0;
}
/**
* fb_set_cmap - set the colormap
* @cmap: frame buffer colormap structure
- * @kspc: boolean, 1 copy local, 0 get_user() function
* @info: frame buffer info structure
*
* Sets the colormap @cmap for a screen of device @info.
*
*/
-int fb_set_cmap(struct fb_cmap *cmap, int kspc, struct fb_info *info)
+int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
{
- int i, start;
- u16 *red, *green, *blue, *transp;
- u_int hred, hgreen, hblue, htransp;
-
- red = cmap->red;
- green = cmap->green;
- blue = cmap->blue;
- transp = cmap->transp;
- start = cmap->start;
-
- if (start < 0 || !info->fbops->fb_setcolreg)
- return -EINVAL;
- for (i = 0; i < cmap->len; i++) {
- if (kspc) {
- hred = *red;
- hgreen = *green;
- hblue = *blue;
- htransp = transp ? *transp : 0xffff;
- } else {
- get_user(hred, red);
- get_user(hgreen, green);
- get_user(hblue, blue);
- if (transp)
- get_user(htransp, transp);
- else
- htransp = 0xffff;
+ int i, start;
+ u16 *red, *green, *blue, *transp;
+ u_int hred, hgreen, hblue, htransp = 0xffff;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ start = cmap->start;
+
+ if (start < 0 || !info->fbops->fb_setcolreg)
+ return -EINVAL;
+ for (i = 0; i < cmap->len; i++) {
+ hred = *red++;
+ hgreen = *green++;
+ hblue = *blue++;
+ if (transp)
+ htransp = *transp++;
+ if (info->fbops->fb_setcolreg(start++,
+ hred, hgreen, hblue, htransp,
+ info))
+ break;
}
- red++;
- green++;
- blue++;
- if (transp)
- transp++;
- if (info->fbops->fb_setcolreg(start++, hred, hgreen, hblue, htransp, info))
- return 0;
- }
- return 0;
+ return 0;
}
+int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
+{
+ int i, start;
+ u16 __user *red, *green, *blue, *transp;
+ u_int hred, hgreen, hblue, htransp = 0xffff;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ start = cmap->start;
+
+ if (start < 0 || !info->fbops->fb_setcolreg)
+ return -EINVAL;
+ for (i = 0; i < cmap->len; i++, red++, blue++, green++) {
+ if (get_user(hred, red) ||
+ get_user(hgreen, green) ||
+ get_user(hblue, blue) ||
+ (transp && get_user(htransp, transp)))
+ return -EFAULT;
+ if (info->fbops->fb_setcolreg(start++,
+ hred, hgreen, hblue, htransp,
+ info))
+ return 0;
+ if (transp)
+ transp++;
+ }
+ return 0;
+}
/**
* fb_default_cmap - get default colormap
return n < 0 ? d >> -n : d << n;
}
-static void __init fb_set_logocmap(struct fb_info *info,
+static void fb_set_logocmap(struct fb_info *info,
const struct linux_logo *logo)
{
struct fb_cmap palette_cmap;
palette_cmap.blue[j] = clut[2] << 8 | clut[2];
clut += 3;
}
- fb_set_cmap(&palette_cmap, 1, info);
+ fb_set_cmap(&palette_cmap, info);
}
}
-static void __init fb_set_logo_truepalette(struct fb_info *info,
+static void fb_set_logo_truepalette(struct fb_info *info,
const struct linux_logo *logo,
u32 *palette)
{
}
}
-static void __init fb_set_logo_directpalette(struct fb_info *info,
+static void fb_set_logo_directpalette(struct fb_info *info,
const struct linux_logo *logo,
u32 *palette)
{
palette[i] = i << redshift | i << greenshift | i << blueshift;
}
-static void __init fb_set_logo(struct fb_info *info,
+static void fb_set_logo(struct fb_info *info,
const struct linux_logo *logo, u8 *dst,
int depth)
{
}
int
-fb_cursor(struct fb_info *info, struct fb_cursor *sprite)
+fb_cursor(struct fb_info *info, struct fb_cursor_user __user *sprite)
{
+ struct fb_cursor_user cursor_user;
struct fb_cursor cursor;
- int err;
+ char *data = NULL, *mask = NULL;
+ u16 *red = NULL, *green = NULL, *blue = NULL, *transp = NULL;
+ int err = -EINVAL;
- if (copy_from_user(&cursor, sprite, sizeof(struct fb_cursor)))
+ if (copy_from_user(&cursor_user, sprite, sizeof(struct fb_cursor_user)))
return -EFAULT;
+ memcpy(&cursor, &cursor_user, sizeof(cursor));
+ cursor.mask = NULL;
+ cursor.image.data = NULL;
+ cursor.image.cmap.red = NULL;
+ cursor.image.cmap.green = NULL;
+ cursor.image.cmap.blue = NULL;
+ cursor.image.cmap.transp = NULL;
+
if (cursor.set & FB_CUR_SETCUR)
info->cursor.enable = 1;
if (cursor.set & FB_CUR_SETCMAP) {
- err = fb_copy_cmap(&cursor.image.cmap, &sprite->image.cmap, 1);
- if (err)
- return err;
+ unsigned len = cursor.image.cmap.len;
+ if ((int)len <= 0)
+ goto out;
+ len *= 2;
+ err = -ENOMEM;
+ red = kmalloc(len, GFP_USER);
+ green = kmalloc(len, GFP_USER);
+ blue = kmalloc(len, GFP_USER);
+ if (!red || !green || !blue)
+ goto out;
+ if (cursor_user.image.cmap.transp) {
+ transp = kmalloc(len, GFP_USER);
+ if (!transp)
+ goto out;
+ }
+ err = -EFAULT;
+ if (copy_from_user(red, cursor_user.image.cmap.red, len))
+ goto out;
+ if (copy_from_user(green, cursor_user.image.cmap.green, len))
+ goto out;
+ if (copy_from_user(blue, cursor_user.image.cmap.blue, len))
+ goto out;
+ if (transp) {
+ if (copy_from_user(transp,
+ cursor_user.image.cmap.transp, len))
+ goto out;
+ }
+ cursor.image.cmap.red = red;
+ cursor.image.cmap.green = green;
+ cursor.image.cmap.blue = blue;
+ cursor.image.cmap.transp = transp;
}
if (cursor.set & FB_CUR_SETSHAPE) {
int size = ((cursor.image.width + 7) >> 3) * cursor.image.height;
- char *data, *mask;
if ((cursor.image.height != info->cursor.image.height) ||
(cursor.image.width != info->cursor.image.width))
cursor.set |= FB_CUR_SETSIZE;
- data = kmalloc(size, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ err = -ENOMEM;
+ data = kmalloc(size, GFP_USER);
+ mask = kmalloc(size, GFP_USER);
+ if (!mask || !data)
+ goto out;
- mask = kmalloc(size, GFP_KERNEL);
- if (!mask) {
- kfree(data);
- return -ENOMEM;
- }
+ err = -EFAULT;
+ if (copy_from_user(data, cursor_user.image.data, size) ||
+ copy_from_user(mask, cursor_user.mask, size))
+ goto out;
- if (copy_from_user(data, sprite->image.data, size) ||
- copy_from_user(mask, sprite->mask, size)) {
- kfree(data);
- kfree(mask);
- return -EFAULT;
- }
cursor.image.data = data;
cursor.mask = mask;
}
info->cursor.set = cursor.set;
info->cursor.rop = cursor.rop;
err = info->fbops->fb_cursor(info, &cursor);
+out:
+ kfree(data);
+ kfree(mask);
+ kfree(red);
+ kfree(green);
+ kfree(blue);
+ kfree(transp);
return err;
}
fb_pan_display(info, &info->var);
- fb_set_cmap(&info->cmap, 1, info);
+ fb_set_cmap(&info->cmap, info);
if (info->flags & FBINFO_MISC_MODECHANGEUSER) {
info->flags &= ~FBINFO_MISC_MODECHANGEUSER;
cmap.len = info->cmap.len;
} else
cmap = info->cmap;
- return fb_set_cmap(&cmap, 1, info);
+ return fb_set_cmap(&cmap, info);
}
static int
#ifdef CONFIG_FRAMEBUFFER_CONSOLE
struct fb_con2fbmap con2fb;
#endif
- struct fb_cmap cmap;
+ struct fb_cmap_user cmap;
+ void __user *argp = (void __user *)arg;
int i;
if (!fb)
return -ENODEV;
switch (cmd) {
case FBIOGET_VSCREENINFO:
- return copy_to_user((void *) arg, &info->var,
+ return copy_to_user(argp, &info->var,
sizeof(var)) ? -EFAULT : 0;
case FBIOPUT_VSCREENINFO:
- if (copy_from_user(&var, (void *) arg, sizeof(var)))
+ if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
acquire_console_sem();
info->flags |= FBINFO_MISC_MODECHANGEUSER;
info->flags &= ~FBINFO_MISC_MODECHANGEUSER;
release_console_sem();
if (i) return i;
- if (copy_to_user((void *) arg, &var, sizeof(var)))
+ if (copy_to_user(argp, &var, sizeof(var)))
return -EFAULT;
return 0;
case FBIOGET_FSCREENINFO:
- return copy_to_user((void *) arg, &info->fix,
+ return copy_to_user(argp, &info->fix,
sizeof(fix)) ? -EFAULT : 0;
case FBIOPUTCMAP:
- if (copy_from_user(&cmap, (void *) arg, sizeof(cmap)))
+ if (copy_from_user(&cmap, argp, sizeof(cmap)))
return -EFAULT;
- return (fb_set_cmap(&cmap, 0, info));
+ return (fb_set_user_cmap(&cmap, info));
case FBIOGETCMAP:
- if (copy_from_user(&cmap, (void *) arg, sizeof(cmap)))
+ if (copy_from_user(&cmap, argp, sizeof(cmap)))
return -EFAULT;
- return (fb_copy_cmap(&info->cmap, &cmap, 2));
+ return fb_cmap_to_user(&info->cmap, &cmap);
case FBIOPAN_DISPLAY:
- if (copy_from_user(&var, (void *) arg, sizeof(var)))
+ if (copy_from_user(&var, argp, sizeof(var)))
return -EFAULT;
acquire_console_sem();
i = fb_pan_display(info, &var);
release_console_sem();
if (i)
return i;
- if (copy_to_user((void *) arg, &var, sizeof(var)))
+ if (copy_to_user(argp, &var, sizeof(var)))
return -EFAULT;
return 0;
case FBIO_CURSOR:
acquire_console_sem();
- i = fb_cursor(info, (struct fb_cursor *) arg);
+ i = fb_cursor(info, argp);
release_console_sem();
return i;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE
case FBIOGET_CON2FBMAP:
- if (copy_from_user(&con2fb, (void *)arg, sizeof(con2fb)))
+ if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
return -EFAULT;
if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
con2fb.framebuffer = con2fb_map[con2fb.console-1];
- return copy_to_user((void *)arg, &con2fb,
+ return copy_to_user(argp, &con2fb,
sizeof(con2fb)) ? -EFAULT : 0;
case FBIOPUT_CON2FBMAP:
- if (copy_from_user(&con2fb, (void *)arg, sizeof(con2fb)))
+ if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
return - EFAULT;
if (con2fb.console < 0 || con2fb.console > MAX_NR_CONSOLES)
return -EINVAL;
#ifdef MEMCPYTOIO_WORKS
memcpy_toio(va.vaddr + offs, src, len);
#elif defined(MEMCPYTOIO_WRITEL)
-#define srcd ((const u_int32_t*)src)
if (offs & 3) {
while (len >= 4) {
- mga_writel(va, offs, get_unaligned(srcd++));
+ mga_writel(va, offs, get_unaligned((u32 *)src));
offs += 4;
len -= 4;
+ src += 4;
}
} else {
while (len >= 4) {
- mga_writel(va, offs, *srcd++);
+ mga_writel(va, offs, *(u32 *)src);
offs += 4;
len -= 4;
+ src += 4;
}
}
-#undef srcd
if (len) {
u_int32_t tmp;
break;
}
} else
- fb_set_cmap(&info->cmap, 1, info);
+ fb_set_cmap(&info->cmap, info);
return 0;
}
//TODO if (pxafb_blank_helper) pxafb_blank_helper(blank);
if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR ||
fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
- fb_set_cmap(&fbi->fb.cmap, 1, info);
+ fb_set_cmap(&fbi->fb.cmap, info);
pxafb_schedule_work(fbi, C_ENABLE);
}
return 0;
dp = pci_device_to_OF_node(rinfo->pdev);
- xtal = (unsigned int *) get_property(dp, "ATY,RefCLK", 0);
+ xtal = (unsigned int *) get_property(dp, "ATY,RefCLK", NULL);
rinfo->pll.ref_clk = *xtal / 10;
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif
+#ifdef CONFIG_PMAC_BACKLIGHT
+#include <asm/backlight.h>
+#endif
#include "rivafb.h"
#include "nvreg.h"
* various helpful macros and constants
*
* ------------------------------------------------------------------------- */
-
-#undef RIVAFBDEBUG
-#ifdef RIVAFBDEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#ifdef CONFIG_FB_RIVA_DEBUG
+#define NVTRACE printk
#else
-#define DPRINTK(fmt, args...)
+#define NVTRACE if(0) printk
#endif
-#ifndef RIVA_NDEBUG
+#define NVTRACE_ENTER(...) NVTRACE("%s START\n", __FUNCTION__)
+#define NVTRACE_LEAVE(...) NVTRACE("%s END\n", __FUNCTION__)
+
+#ifdef CONFIG_FB_RIVA_DEBUG
#define assert(expr) \
if(!(expr)) { \
printk( "Assertion failed! %s,%s,%s,line=%d\n",\
{ "GeForce2-GTS", NV_ARCH_10 },
{ "GeForce2-ULTRA", NV_ARCH_10 },
{ "Quadro2-PRO", NV_ARCH_10 },
- { "GeForce4-MX-460", NV_ARCH_20 },
- { "GeForce4-MX-440", NV_ARCH_20 },
- { "GeForce4-MX-420", NV_ARCH_20 },
- { "GeForce4-440-GO", NV_ARCH_20 },
- { "GeForce4-420-GO", NV_ARCH_20 },
- { "GeForce4-420-GO-M32", NV_ARCH_20 },
- { "Quadro4-500-XGL", NV_ARCH_20 },
- { "GeForce4-440-GO-M64", NV_ARCH_20 },
- { "Quadro4-200", NV_ARCH_20 },
- { "Quadro4-550-XGL", NV_ARCH_20 },
- { "Quadro4-500-GOGL", NV_ARCH_20 },
- { "GeForce2", NV_ARCH_20 },
+ { "GeForce4-MX-460", NV_ARCH_10 },
+ { "GeForce4-MX-440", NV_ARCH_10 },
+ { "GeForce4-MX-420", NV_ARCH_10 },
+ { "GeForce4-440-GO", NV_ARCH_10 },
+ { "GeForce4-420-GO", NV_ARCH_10 },
+ { "GeForce4-420-GO-M32", NV_ARCH_10 },
+ { "Quadro4-500-XGL", NV_ARCH_10 },
+ { "GeForce4-440-GO-M64", NV_ARCH_10 },
+ { "Quadro4-200", NV_ARCH_10 },
+ { "Quadro4-550-XGL", NV_ARCH_10 },
+ { "Quadro4-500-GOGL", NV_ARCH_10 },
+ { "GeForce2", NV_ARCH_10 },
{ "GeForce3", NV_ARCH_20 },
{ "GeForce3 Ti 200", NV_ARCH_20 },
{ "GeForce3 Ti 500", NV_ARCH_20 },
0xEB /* MISC */
};
+/*
+ * Backlight control
+ */
+#ifdef CONFIG_PMAC_BACKLIGHT
+
+static int riva_backlight_levels[] = {
+ 0x158,
+ 0x192,
+ 0x1c6,
+ 0x200,
+ 0x234,
+ 0x268,
+ 0x2a2,
+ 0x2d6,
+ 0x310,
+ 0x344,
+ 0x378,
+ 0x3b2,
+ 0x3e6,
+ 0x41a,
+ 0x454,
+ 0x534,
+};
+
+static int riva_set_backlight_enable(int on, int level, void *data);
+static int riva_set_backlight_level(int level, void *data);
+static struct backlight_controller riva_backlight_controller = {
+ riva_set_backlight_enable,
+ riva_set_backlight_level
+};
+#endif /* CONFIG_PMAC_BACKLIGHT */
+
/* ------------------------------------------------------------------------- *
*
* MMIO access macros
{
int i;
+ NVTRACE_ENTER();
par->riva.LockUnlock(&par->riva, 0);
par->riva.UnloadStateExt(&par->riva, ®s->ext);
for (i = 0; i < NUM_SEQ_REGS; i++)
regs->seq[i] = SEQin(par, i);
+ NVTRACE_LEAVE();
}
/**
RIVA_HW_STATE *state = ®s->ext;
int i;
+ NVTRACE_ENTER();
CRTCout(par, 0x11, 0x00);
par->riva.LockUnlock(&par->riva, 0);
for (i = 0; i < NUM_SEQ_REGS; i++)
SEQout(par, i, regs->seq[i]);
+ NVTRACE_LEAVE();
}
/**
struct riva_par *par = (struct riva_par *) info->par;
struct riva_regs newmode;
+ NVTRACE_ENTER();
/* time to calculate */
rivafb_blank(1, info);
riva_load_state(par, &par->current_state);
par->riva.LockUnlock(&par->riva, 0); /* important for HW cursor */
rivafb_blank(0, info);
+ NVTRACE_LEAVE();
}
static void riva_update_var(struct fb_var_screeninfo *var, struct fb_videomode *modedb)
{
+ NVTRACE_ENTER();
var->xres = var->xres_virtual = modedb->xres;
var->yres = modedb->yres;
if (var->yres_virtual < var->yres)
var->vsync_len = modedb->vsync_len;
var->sync = modedb->sync;
var->vmode = modedb->vmode;
+ NVTRACE_LEAVE();
}
/**
};
int i;
+ NVTRACE_ENTER();
/* use highest possible virtual resolution */
if (var->xres_virtual == -1 && var->yres_virtual == -1) {
printk(KERN_WARNING PFX
if (modes[i].xres == -1) {
printk(KERN_ERR PFX
"could not find a virtual resolution that fits into video memory!!\n");
- DPRINTK("EXIT - EINVAL error\n");
+ NVTRACE("EXIT - EINVAL error\n");
return -EINVAL;
}
var->xres_virtual = modes[i].xres;
printk(KERN_ERR PFX
"mode %dx%dx%d rejected...resolution too high to fit into video memory!\n",
var->xres, var->yres, var->bits_per_pixel);
- DPRINTK("EXIT - EINVAL error\n");
+ NVTRACE("EXIT - EINVAL error\n");
return -EINVAL;
}
}
var->yres_virtual = 0x7fff/nom;
if (var->xres_virtual > 0x7fff/nom)
var->xres_virtual = 0x7fff/nom;
-
+ NVTRACE_LEAVE();
return 0;
}
return rc;
}
+/* ------------------------------------------------------------------------- *
+ *
+ * Backlight operations
+ *
+ * ------------------------------------------------------------------------- */
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int riva_set_backlight_enable(int on, int level, void *data)
+{
+ struct riva_par *par = (struct riva_par *)data;
+ U032 tmp_pcrt, tmp_pmc;
+
+ tmp_pmc = par->riva.PMC[0x10F0/4] & 0x0000FFFF;
+ tmp_pcrt = par->riva.PCRTC0[0x081C/4] & 0xFFFFFFFC;
+ if(on && (level > BACKLIGHT_OFF)) {
+ tmp_pcrt |= 0x1;
+ tmp_pmc |= (1 << 31); // backlight bit
+ tmp_pmc |= riva_backlight_levels[level-1] << 16; // level
+ }
+ par->riva.PCRTC0[0x081C/4] = tmp_pcrt;
+ par->riva.PMC[0x10F0/4] = tmp_pmc;
+ return 0;
+}
+
+static int riva_set_backlight_level(int level, void *data)
+{
+ return riva_set_backlight_enable(1, level, data);
+}
+#endif /* CONFIG_PMAC_BACKLIGHT */
+
/* ------------------------------------------------------------------------- *
*
* framebuffer operations
struct riva_par *par = (struct riva_par *) info->par;
int cnt = atomic_read(&par->ref_count);
+ NVTRACE_ENTER();
if (!cnt) {
memset(&par->state, 0, sizeof(struct vgastate));
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS;
riva_save_state(par, &par->initial_state);
}
atomic_inc(&par->ref_count);
+ NVTRACE_LEAVE();
return 0;
}
struct riva_par *par = (struct riva_par *) info->par;
int cnt = atomic_read(&par->ref_count);
+ NVTRACE_ENTER();
if (!cnt)
return -EINVAL;
if (cnt == 1) {
par->riva.LockUnlock(&par->riva, 1);
}
atomic_dec(&par->ref_count);
+ NVTRACE_LEAVE();
return 0;
}
int nom, den; /* translating from pixels->bytes */
int mode_valid = 0;
+ NVTRACE_ENTER();
switch (var->bits_per_pixel) {
case 1 ... 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
printk(KERN_ERR PFX
"mode %dx%dx%d rejected...color depth not supported.\n",
var->xres, var->yres, var->bits_per_pixel);
- DPRINTK("EXIT, returning -EINVAL\n");
+ NVTRACE("EXIT, returning -EINVAL\n");
return -EINVAL;
}
var->green.msb_right =
var->blue.msb_right =
var->transp.offset = var->transp.length = var->transp.msb_right = 0;
+ NVTRACE_LEAVE();
return 0;
}
{
struct riva_par *par = (struct riva_par *) info->par;
+ NVTRACE_ENTER();
riva_common_setup(par);
RivaGetConfig(&par->riva, par->Chipset);
/* vgaHWunlock() + riva unlock (0x7F) */
info->fix.line_length = (info->var.xres_virtual * (info->var.bits_per_pixel >> 3));
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
+ NVTRACE_LEAVE();
return 0;
}
struct riva_par *par = (struct riva_par *)info->par;
unsigned int base;
+ NVTRACE_ENTER();
if (var->xoffset > (var->xres_virtual - var->xres))
return -EINVAL;
if (var->yoffset > (var->yres_virtual - var->yres))
info->var.vmode |= FB_VMODE_YWRAP;
else
info->var.vmode &= ~FB_VMODE_YWRAP;
+ NVTRACE_LEAVE();
return 0;
}
tmp = SEQin(par, 0x01) & ~0x20; /* screen on/off */
vesa = CRTCin(par, 0x1a) & ~0xc0; /* sync on/off */
+ NVTRACE_ENTER();
if (blank) {
tmp |= 0x20;
switch (blank - 1) {
}
SEQout(par, 0x01, tmp);
CRTCout(par, 0x1a, vesa);
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+ if ( par->FlatPanel && _machine == _MACH_Pmac) {
+ set_backlight_enable(!blank);
+ }
+#endif
+
+ NVTRACE_LEAVE();
return 0;
}
{
unsigned int cmap_len;
+ NVTRACE_ENTER();
info->flags = FBINFO_DEFAULT
| FBINFO_HWACCEL_XPAN
| FBINFO_HWACCEL_YPAN
info->pixmap.scan_align = 4;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->var.yres_virtual = -1;
+ NVTRACE_LEAVE();
return (rivafb_check_var(&info->var, info));
}
"DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID,B", "EDID,A", NULL };
int i;
+ NVTRACE_ENTER();
dp = pci_device_to_OF_node(pd);
for (; dp != NULL; dp = dp->child) {
disptype = (unsigned char *)get_property(dp, "display-type", NULL);
}
}
}
+ NVTRACE_LEAVE();
return 0;
}
#endif /* CONFIG_PPC_OF */
struct fb_monspecs *specs = &info->monspecs;
struct fb_videomode modedb;
+ NVTRACE_ENTER();
/* respect mode options */
if (mode_option) {
fb_find_mode(var, info, mode_option,
riva_update_var(var, &modedb);
}
var->accel_flags |= FB_ACCELF_TEXT;
+ NVTRACE_LEAVE();
}
static void riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
{
+ struct riva_par *par;
+ int i;
+
+ NVTRACE_ENTER();
#ifdef CONFIG_PPC_OF
if (!riva_get_EDID_OF(info, pdev))
printk("rivafb: could not retrieve EDID from OF\n");
#else
/* XXX use other methods later */
#ifdef CONFIG_FB_RIVA_I2C
- struct riva_par *par = (struct riva_par *) info->par;
- int i;
+ par = (struct riva_par *) info->par;
riva_create_i2c_busses(par);
for (i = par->bus; i >= 1; i--) {
riva_probe_i2c_connector(par, i, &par->EDID);
riva_delete_i2c_busses(par);
#endif
#endif
+ NVTRACE_LEAVE();
}
struct riva_par *default_par;
struct fb_info *info;
+ NVTRACE_ENTER();
assert(pd != NULL);
assert(rci != NULL);
info->fix.id,
info->fix.smem_len / (1024 * 1024),
info->fix.smem_start);
+#ifdef CONFIG_PMAC_BACKLIGHT
+ if (default_par->FlatPanel && _machine == _MACH_Pmac)
+ register_backlight_controller(&riva_backlight_controller,
+ default_par, "mnca");
+#endif
+ NVTRACE_LEAVE();
return 0;
err_out_iounmap_fb:
struct fb_info *info = pci_get_drvdata(pd);
struct riva_par *par = (struct riva_par *) info->par;
+ NVTRACE_ENTER();
if (!info)
return;
kfree(par);
kfree(info);
pci_set_drvdata(pd, NULL);
+ NVTRACE_LEAVE();
}
/* ------------------------------------------------------------------------- *
{
char *this_opt;
+ NVTRACE_ENTER();
if (!options || !*options)
return 0;
} else
mode_option = this_opt;
}
+ NVTRACE_LEAVE();
return 0;
}
#endif /* !MODULE */
case VESA_NO_BLANKING:
if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR ||
fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
- fb_set_cmap(&fbi->fb.cmap, 1, info);
+ fb_set_cmap(&fbi->fb.cmap, info);
sa1100fb_schedule_work(fbi, C_ENABLE);
}
return 0;
#error Where is GPIO24 set as an output? Can we fit this in somewhere else?
if (machine_is_graphicsclient()) {
// From ADS doc again...same as disable
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(20 * HZ / 1000);
+ msleep(20);
GPSR |= GPIO_GPIO24;
}
#endif
* We'll wait 20msec.
*/
GPCR |= GPIO_GPIO24;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(20 * HZ / 1000);
+ msleep(20);
}
#endif
#ifdef CONFIG_SA1100_HUW_WEBPANEL
{
switch(cmd) {
case FBIOGTYPE: {
- struct fbtype *f = (struct fbtype *) arg;
+ struct fbtype __user *f = (struct fbtype __user *) arg;
if (put_user(type, &f->fb_type) ||
__put_user(info->var.yres, &f->fb_height) ||
return 0;
}
case FBIOPUTCMAP_SPARC: {
- struct fbcmap *c = (struct fbcmap *) arg;
+ struct fbcmap __user *c = (struct fbcmap __user *) arg;
struct fb_cmap cmap;
u16 red, green, blue;
- unsigned char *ured, *ugreen, *ublue;
+ unsigned char __user *ured;
+ unsigned char __user *ugreen;
+ unsigned char __user *ublue;
int index, count, i;
if (get_user(index, &c->index) ||
return -EFAULT;
cmap.start = index + i;
- err = fb_set_cmap(&cmap, 0, info);
+ err = fb_set_cmap(&cmap, info);
if (err)
return err;
}
return 0;
}
case FBIOGETCMAP_SPARC: {
- struct fbcmap *c = (struct fbcmap *) arg;
- unsigned char *ured, *ugreen, *ublue;
+ struct fbcmap __user *c = (struct fbcmap __user *) arg;
+ unsigned char __user *ured;
+ unsigned char __user *ugreen;
+ unsigned char __user *ublue;
struct fb_cmap *cmap = &info->cmap;
int index, count, i;
if(con != ivideo->currcon) return;
if(fb_display[con].cmap.len) {
- fb_set_cmap(&fb_display[con].cmap, 1, sisfb_setcolreg, info);
+ fb_set_cmap(&fb_display[con].cmap, sisfb_setcolreg, info);
} else {
int size = sisfb_get_cmap_len(&fb_display[con].var);
- fb_set_cmap(fb_default_cmap(size), 1, sisfb_setcolreg, info);
+ fb_set_cmap(fb_default_cmap(size), sisfb_setcolreg, info);
}
}
info->cursor.image.fg_color = cursor->image.fg_color;
} else {
if (cursor->image.cmap.len)
- fb_copy_cmap(&cursor->image.cmap, &info->cursor.image.cmap, 0);
+ fb_copy_cmap(&cursor->image.cmap, &info->cursor.image.cmap);
}
info->cursor.image.depth = cursor->image.depth;
}
bg_color = ((cmap.red[cmap.start+1] << 16) |
(cmap.green[cmap.start+1] << 8) |
(cmap.blue[cmap.start+1]));
- fb_copy_cmap(&cmap, &info->cursor.image.cmap, 0);
+ fb_copy_cmap(&cmap, &info->cursor.image.cmap);
spin_lock_irqsave(&par->DAClock, flags);
banshee_make_room(par, 2);
tdfx_outl(par, HWCURC0, bg_color);
static int valkyriefb_blank(int blank_mode, struct fb_info *info);
static int read_valkyrie_sense(struct fb_info_valkyrie *p);
-static inline int valkyrie_vram_reqd(int video_mode, int color_mode);
static void set_valkyrie_clock(unsigned char *params);
-static inline int valkyrie_par_to_var(struct fb_par_valkyrie *par, struct fb_var_screeninfo *var);
static int valkyrie_var_to_par(struct fb_var_screeninfo *var,
struct fb_par_valkyrie *par, const struct fb_info *fb_info);
return 0;
}
+static inline int valkyrie_par_to_var(struct fb_par_valkyrie *par,
+ struct fb_var_screeninfo *var)
+{
+ return mac_vmode_to_var(par->vmode, par->cmode, var);
+}
+
static int
valkyriefb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
return 0;
}
-static int valkyrie_vram_reqd(int video_mode, int color_mode)
+static inline int valkyrie_vram_reqd(int video_mode, int color_mode)
{
int pitch;
struct valkyrie_regvals *init = valkyrie_reg_init[video_mode-1];
return 0;
}
-static int valkyrie_par_to_var(struct fb_par_valkyrie *par, struct fb_var_screeninfo *var)
-{
- return mac_vmode_to_var(par->vmode, par->cmode, var);
-}
-
static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valkyrie *p)
{
memset(fix, 0, sizeof(*fix));
config W1_MATROX
tristate "Matrox G400 transport layer for 1-wire"
- depends on W1
+ depends on W1 && PCI
help
Say Y here if you want to communicate with your 1-wire devices
using Matrox's G400 GPIO pins.
#include <asm/atomic.h>
#include <asm/types.h>
#include <asm/io.h>
-#include <asm/delay.h>
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
*/
#include <asm/atomic.h>
-#include <asm/delay.h>
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
dev = kmalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
if (!dev) {
printk(KERN_ERR
- "Failed to allocate %d bytes for new w1 device.\n",
+ "Failed to allocate %zd bytes for new w1 device.\n",
sizeof(struct w1_master));
return NULL;
}
*/
#include <asm/io.h>
-#include <asm/delay.h>
+#include <linux/delay.h>
#include <linux/moduleparam.h>
#include "w1.h"
messages at debug level 1 while the misbehaviour was occurring.
config JFFS2_FS_NAND
- bool "JFFS2 support for NAND flash (EXPERIMENTAL)"
- depends on JFFS2_FS && EXPERIMENTAL
+ bool "JFFS2 support for NAND flash"
+ depends on JFFS2_FS
default n
help
- This enables the experimental support for NAND flash in JFFS2. NAND
- is a newer type of flash chip design than the traditional NOR flash,
- with higher density but a handful of characteristics which make it
- more interesting for the file system to use. Support for NAND flash
- is not yet complete and may corrupt data. For further information,
- including a link to the mailing list where details of the remaining
- work to be completed for NAND flash support can be found, see the
- JFFS2 web site at <http://sources.redhat.com/jffs2>.
+ This enables the support for NAND flash in JFFS2. NAND is a newer
+ type of flash chip design than the traditional NOR flash, with
+ higher density but a handful of characteristics which make it more
+ interesting for the file system to use.
- Say 'N' unless you have NAND flash and you are willing to test and
- develop JFFS2 support for it.
+ Say 'N' unless you have NAND flash.
config JFFS2_COMPRESSION_OPTIONS
bool "Advanced compression options for JFFS2"
+ depends on JFFS2_FS
default n
help
Enabling this option allows you to explicitly choose which
endchoice
-config JFFS2_PROC
- bool "JFFS2 proc interface support" if JFFS2_COMPRESSION_OPTIONS
- depends on JFFS2_FS && PROC_FS
- default n
- help
- You can read some statistics and set the compression mode and
- compressor priorities with this interface.
-
-
config CRAMFS
tristate "Compressed ROM file system support"
select ZLIB_INFLATE
Enabling this option will cause statistics for each server share
mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
+config CIFS_XATTR
+ bool "CIFS extended attributes (EXPERIMENTAL)"
+ depends on CIFS
+ help
+ Extended attributes are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page, or visit
+ <http://acl.bestbits.at/> for details). CIFS maps the name of
+ extended attributes beginning with the user namespace prefix
+ to SMB/CIFS EAs. EAs are stored on Windows servers without the
+ user namespace prefix, but their names are seen by Linux cifs clients
+ prefaced by the user namespace prefix. The system namespace
+ (used by some filesystems to store ACLs) is not supported at
+ this time.
+
+ If unsure, say N.
+
config CIFS_POSIX
bool "CIFS POSIX Extensions (EXPERIMENTAL)"
depends on CIFS
(current->mm->start_data = N_DATADDR(ex));
current->mm->brk = ex.a_bss +
(current->mm->start_brk = N_BSSADDR(ex));
- current->mm->free_area_cache = TASK_UNMAPPED_BASE;
- /* unlimited stack is larger than TASK_SIZE */
- current->mm->non_executable_cache = current->mm->mmap_top;
+ current->mm->free_area_cache = current->mm->mmap_base;
+
current->mm->rss = 0;
current->mm->mmap = NULL;
compute_creds(bprm);
return error;
}
- error = bprm->file->f_op->read(bprm->file, (char *)text_addr,
+ error = bprm->file->f_op->read(bprm->file,
+ (char __user *)text_addr,
ex.a_text+ex.a_data, &pos);
if ((signed long)error < 0) {
send_sig(SIGKILL, current, 0);
if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
loff_t pos = fd_offset;
do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
- bprm->file->f_op->read(bprm->file,(char *)N_TXTADDR(ex),
+ bprm->file->f_op->read(bprm->file,
+ (char __user *)N_TXTADDR(ex),
ex.a_text+ex.a_data, &pos);
flush_icache_range((unsigned long) N_TXTADDR(ex),
(unsigned long) N_TXTADDR(ex) +
do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
- file->f_op->read(file, (char *)start_addr,
+ file->f_op->read(file, (char __user *)start_addr,
ex.a_text + ex.a_data, &pos);
flush_icache_range((unsigned long) start_addr,
(unsigned long) start_addr + ex.a_text + ex.a_data);
NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
if (k_platform) {
- NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
+ NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
}
if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
if (interp_aout) {
argv = sp + 2;
envp = argv + argc + 1;
- __put_user((elf_addr_t)(long)argv, sp++);
- __put_user((elf_addr_t)(long)envp, sp++);
+ __put_user((elf_addr_t)(unsigned long)argv, sp++);
+ __put_user((elf_addr_t)(unsigned long)envp, sp++);
} else {
argv = sp;
envp = argv + argc + 1;
struct exec interp_ex;
char passed_fileno[6];
struct files_struct *files;
- int executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
+ int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
unsigned long def_flags = 0;
/* Get the exec-header */
executable_stack = EXSTACK_DISABLE_X;
break;
}
- if (i == elf_ex.e_phnum)
- def_flags |= VM_EXEC | VM_MAYEXEC;
+ have_pt_gnu_stack = (i < elf_ex.e_phnum);
relocexec = 0;
current->mm->end_data = 0;
current->mm->end_code = 0;
current->mm->mmap = NULL;
-#ifdef __HAVE_ARCH_MMAP_TOP
- current->mm->mmap_top = mmap_top();
-#endif
current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags;
/* Do this immediately, since STACK_TOP as used in setup_arg_pages
may depend on the personality. */
SET_PERSONALITY(elf_ex, ibcs2_interpreter);
+ if (elf_read_implies_exec(elf_ex, have_pt_gnu_stack))
+ current->personality |= READ_IMPLIES_EXEC;
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
current->mm->rss = 0;
- current->mm->free_area_cache = TASK_UNMAPPED_BASE;
- current->mm->non_executable_cache = current->mm->mmap_top;
+ current->mm->free_area_cache = current->mm->mmap_base;
retval = setup_arg_pages(bprm, executable_stack);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
len, offset);
}
+/**
+ * bio_uncopy_user - finish previously mapped bio
+ * @bio: bio being terminated
+ *
+ * Free pages allocated from bio_copy_user() and write back data
+ * to user space in case of a read.
+ */
+int bio_uncopy_user(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ int i, ret = 0;
+
+ if (bio_data_dir(bio) == READ) {
+ char *uaddr = bio->bi_private;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ char *addr = page_address(bvec->bv_page);
+
+ if (!ret && copy_to_user(uaddr, addr, bvec->bv_len))
+ ret = -EFAULT;
+
+ __free_page(bvec->bv_page);
+ uaddr += bvec->bv_len;
+ }
+ }
+
+ bio_put(bio);
+ return ret;
+}
+
+/**
+ * bio_copy_user - copy user data to bio
+ * @q: destination block queue
+ * @uaddr: start of user address
+ * @len: length in bytes
+ * @write_to_vm: bool indicating writing to pages or not
+ *
+ * Prepares and returns a bio for indirect user io, bouncing data
+ * to/from kernel pages as necessary. Must be paired with
+ * call bio_uncopy_user() on io completion.
+ */
+struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
+ unsigned int len, int write_to_vm)
+{
+ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = uaddr >> PAGE_SHIFT;
+ struct bio_vec *bvec;
+ struct page *page;
+ struct bio *bio;
+ int i, ret;
+
+ bio = bio_alloc(GFP_KERNEL, end - start);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
+ ret = 0;
+ while (len) {
+ unsigned int bytes = PAGE_SIZE;
+
+ if (bytes > len)
+ bytes = len;
+
+ page = alloc_page(q->bounce_gfp | GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ if (__bio_add_page(q, bio, page, bytes, 0) < bytes) {
+ ret = -EINVAL;
+ break;
+ }
+
+ len -= bytes;
+ }
+
+ /*
+ * success
+ */
+ if (!ret) {
+ if (!write_to_vm) {
+ bio->bi_rw |= (1 << BIO_RW);
+ /*
+ * for a write, copy in data to kernel pages
+ */
+ ret = -EFAULT;
+ bio_for_each_segment(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+
+ if (copy_from_user(addr, (char *) uaddr, bvec->bv_len))
+ goto cleanup;
+ }
+ }
+
+ bio->bi_private = (void *) uaddr;
+ return bio;
+ }
+
+ /*
+ * cleanup
+ */
+cleanup:
+ bio_for_each_segment(bvec, bio, i)
+ __free_page(bvec->bv_page);
+
+ bio_put(bio);
+ return ERR_PTR(ret);
+}
+
static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len,
int write_to_vm)
* size for now, in the future we can relax this restriction
*/
if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
- return NULL;
+ return ERR_PTR(-EINVAL);
bio = bio_alloc(GFP_KERNEL, nr_pages);
if (!bio)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages)
goto out;
if (!write_to_vm)
bio->bi_rw |= (1 << BIO_RW);
- blk_queue_bounce(q, &bio);
+ bio->bi_flags |= (1 << BIO_USER_MAPPED);
return bio;
out:
kfree(pages);
bio_put(bio);
- return NULL;
+ return ERR_PTR(ret);
}
/**
* @write_to_vm: bool indicating writing to pages or not
*
* Map the user space address into a bio suitable for io to a block
- * device.
+ * device. Returns an error pointer in case of error.
*/
struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len, int write_to_vm)
bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm);
- if (bio) {
- /*
- * subtle -- if __bio_map_user() ended up bouncing a bio,
- * it would normally disappear when its bi_end_io is run.
- * however, we need it for the unmap, so grab an extra
- * reference to it
- */
- bio_get(bio);
+ if (IS_ERR(bio))
+ return bio;
- if (bio->bi_size < len) {
- bio_endio(bio, bio->bi_size, 0);
- bio_unmap_user(bio, 0);
- return NULL;
- }
- }
+ /*
+ * subtle -- if __bio_map_user() ended up bouncing a bio,
+ * it would normally disappear when its bi_end_io is run.
+ * however, we need it for the unmap, so grab an extra
+ * reference to it
+ */
+ bio_get(bio);
- return bio;
+ if (bio->bi_size == len)
+ return bio;
+
+ /*
+ * don't support partial mappings
+ */
+ bio_endio(bio, bio->bi_size, 0);
+ bio_unmap_user(bio);
+ return ERR_PTR(-EINVAL);
}
-static void __bio_unmap_user(struct bio *bio, int write_to_vm)
+static void __bio_unmap_user(struct bio *bio)
{
struct bio_vec *bvec;
int i;
* make sure we dirty pages we wrote to
*/
__bio_for_each_segment(bvec, bio, i, 0) {
- if (write_to_vm)
+ if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page);
page_cache_release(bvec->bv_page);
/**
* bio_unmap_user - unmap a bio
* @bio: the bio being unmapped
- * @write_to_vm: bool indicating whether pages were written to
*
- * Unmap a bio previously mapped by bio_map_user(). The @write_to_vm
- * must be the same as passed into bio_map_user(). Must be called with
+ * Unmap a bio previously mapped by bio_map_user(). Must be called with
* a process context.
*
* bio_unmap_user() may sleep.
*/
-void bio_unmap_user(struct bio *bio, int write_to_vm)
+void bio_unmap_user(struct bio *bio)
{
- __bio_unmap_user(bio, write_to_vm);
+ __bio_unmap_user(bio);
bio_put(bio);
}
EXPORT_SYMBOL(bio_pair_release);
EXPORT_SYMBOL(bio_split);
EXPORT_SYMBOL(bio_split_pool);
+EXPORT_SYMBOL(bio_copy_user);
+EXPORT_SYMBOL(bio_uncopy_user);
+Version 1.22
+------------
+Add config option to enable XATTR (extended attribute) support, mapping
+xattr names in the "user." namespace space to SMB/CIFS EAs.
+
+Version 1.21
+------------
+Add new mount parm to control whether mode check (vfs_permission) is done on
+the client. If Unix extensions are enabled and the uids on the client
+and server do not match, client permission checks are meaningless on
+server uids that do not exist on the client (this does not affect the
+normal ACL check which occurs on the server). Fix default uid
+on mknod to match create and mkdir. Add optional mount parm to allow
+override of the default uid behavior (in which the server sets the uid
+and gid of newly created files). Normally for network filesystem mounts
+user want the server to set the uid/gid on newly created files (rather than
+using uid of the client processes you would in a local filesystem).
+
Version 1.20
------------
Make transaction counts more consistent. Merge /proc/fs/cifs/SimultaneousOps
5) make dep
6) make modules (or "make" if CIFS VFS not to be built as a module)
-For Linux 2.5:
+For Linux 2.6:
1) Download the kernel (e.g. from http://www.kernel.org or from bitkeeper
at bk://linux.bkbits.net/linux-2.5) and change directory into the top
of the kernel directory tree (e.g. /usr/src/linux-2.5.73)
similar files reside (usually /sbin). Although the helper software is not
required, mount.cifs is recommended. Eventually the Samba 3.0 utility program
"net" may also be helpful since it may someday provide easier mount syntax for
-users who are used to Windows e.g. net use <mount point> <UNC name or cifs URL>
+users who are used to Windows e.g. net use <mount point> <UNC name or cifs URL>
Note that running the Winbind pam/nss module (logon service) on all of your
Linux clients is useful in mapping Uids and Gids consistently across the
domain to the proper network user. The mount.cifs mount helper can be
gcc samba/source/client/mount.cifs.c -o mount.cifs
+Allowing User Mounts
+====================
+To permit users to mount and unmount over directories they own is possible
+with the cifs vfs. A way to enable such mounting is to mark the mount.cifs
+utility as suid (e.g. "chmod +s /sbin/mount/cifs). To enable users to
+umount shares they mount requires
+1) mount.cifs version 1.4 or later
+2) an entry for the share in /etc/fstab indicating that a user may
+unmount it e.g.
+//server/usersharename /mnt/username cifs user 0 0
+
Note that when the mount.cifs utility is run suid (allowing user mounts),
in order to reduce risks, the "nosuid" mount flag is passed in on mount to
disallow execution of an suid program mounted on the remote target.
delete readonly = yes
ea support = yes
-Note that ea support is required for supporting Linux xattrs.
-Some administrators also change the "map archive" and the "create mask"
-parameters from their default values. Creating special devices (mknod)
+Note that server ea support is required for supporting xattrs from the Linux
+cifs client, and that EA support is present in later versions of Samba (e.g.
+3.0.6 and later (also EA support works in all versions of Windows, at least to
+shares on NTFS filesystems). Extended Attribute (xattr) support is an optional
+feature of most Linux filesystems which may require enabling via
+make menuconfig
+
+Some administrators may want to change Samba's smb.conf "map archive" and
+"create mask" parameters from the default. Creating special devices (mknod)
remotely may require specifying a mkdev function to Samba if you are not using
-Samba 3.0.5 or later. For more information on these see the manual pages
+Samba 3.0.6 or later. For more information on these see the manual pages
("man smb.conf") on the Samba server system. Note that the cifs vfs,
unlike the smbfs vfs, does not read the smb.conf on the client system
(the few optional settings are passed in on mount via -o parameters instead).
Note that Samba 2.2.7 or later includes a fix that allows the CIFS VFS to delete
open files (required for strict POSIX compliance). Windows Servers already
supported this feature. Samba server does not allow symlinks that refer to files
-outside of the share, so in Samba versions prior to 3.0.5, most symlinks to
+outside of the share, so in Samba versions prior to 3.0.6, most symlinks to
files with absolute paths (ie beginning with slash) such as:
ln -s /mnt/foo bar
-would be forbidden. Samba 3.0.5 server or later includes the ability to create
+would be forbidden. Samba 3.0.6 server or later includes the ability to create
such symlinks safely by converting unsafe symlinks (ie symlinks to server
files that are outside of the share) to a samba specific format on the server
that is ignored by local server applications and non-cifs clients and that will
running an altered binary on your local system (downloaded from a hostile server
or altered by a hostile router).
+Although mounting using format corresponding to the CIFS URL specification is
+not possible in mount.cifs yet, it is possible to use an alternate format
+for the server and sharename (which is somewhat similar to NFS style mount
+syntax) instead of the more widely used UNC format (i.e. \\server\share):
+ mount -t cifs tcp_name_of_server:share_name /mnt -o user=myname,pass=mypasswd
+
When using the mount helper mount.cifs, passwords may be specified via alternate
mechanisms, instead of specifying it after -o using the normal "pass=" syntax
on the command line:
mount helper will not prompt the user for a password
if guest is specified on the mount options. If no
password is specified a null password will be used.
-
+ perm Client does permission checks (vfs_permission check of uid
+ and gid of the file against the mode and desired operation),
+ Note that this is in addition to the normal ACL check on the
+ target machine done by the server software.
+ Client permission checking is enabled by default.
+ noperm Client does not do permission checks. This can expose
+ files on this mount to access by other users on the local
+ client system. It is typically only needed when the server
+ supports the CIFS Unix Extensions but the UIDs/GIDs on the
+ client and server system do not match closely enough to allow
+ access by the user doing the mount.
+ Note that this does not affect the normal ACL check on the
+ target machine done by the server software (of the server
+ ACL against the user name provided at mount time).
+ setuids If the CIFS Unix extensions are negotiated with the server
+ the client will attempt to set the effective uid and gid of
+ the local process on newly created files, directories, and
+ devices (create, mkdir, mknod).
+ nosetuids The client will not attempt to set the uid and gid on
+ on newly created files, directories, and devices (create,
+ mkdir, mknod) which will result in the server setting the
+ uid and gid to the default (usually the server uid of the
+ usern who mounted the share). Letting the server (rather than
+ the client) set the uid and gid is the default. This
+ parameter has no effect if the CIFS Unix Extensions are not
+ negotiated.
+
The mount.cifs mount helper also accepts a few mount options before -o
including:
echo 1 > /proc/fs/cifs/traceSMB
-Three other experimental features are under development and to test
+Two other experimental features are under development and to test
require enabling an ifdef (e.g. by adding "#define CIFS_FCNTL" in cifsglob.h)
CONFIG_CIFS_QUOTA
- CONFIG_CIFS_XATTR
-
CONFIG_CIFS_FCNTL (fcntl needed for support of directory change
notification and perhaps later for file leases)
-version 1.16 May 27, 2004
+version 1.22 July 30, 2004
A Partial List of Missing Features
==================================
a) Support for SecurityDescriptors for chmod/chgrp/chown so
these can be supported for Windows servers
-b) Better pam/winbind integration
+b) Better pam/winbind integration (e.g. to handle uid mapping
+better)
c) multi-user mounts - multiplexed sessionsetups over single vc
(ie tcp session) - prettying up needed
h) quota support
-i) support for the Linux 2.5 kernel new feature get_xattr and set_xattr
-which will allow us to expose dos attributes as well as real
-ACLs. This support has been started in the current code, but is
-ifdeffed out.
-
-k) finish writepages support (multi-page write behind for improved
+j) finish writepages support (multi-page write behind for improved
performance) and syncpage
-l) hook lower into the sockets api (as NFS/SunRPC does) to avoid the
+k) hook lower into the sockets api (as NFS/SunRPC does) to avoid the
extra copy in/out of the socket buffers in some cases.
-m) finish support for IPv6. This is mostly complete but
+l) finish support for IPv6. This is mostly complete but
needs a simple inet_pton like function to convert ipv6
addresses in string representation.
-o) Better optimize open (and pathbased setfilesize) to reduce the
+m) Better optimize open (and pathbased setfilesize) to reduce the
oplock breaks coming from windows srv. Piggyback identical file
opens on top of each other by incrementing reference count rather
than resending (helps reduce server resource utilization and avoid
spurious oplock breaks).
-p) Improve performance of readpages by sending more than one read
+o) Improve performance of readpages by sending more than one read
at a time when 8 pages or more are requested. Evaluate whether
reads larger than 16K would be helpful.
-q) For support of Windows9x/98 we need to retry failed mounts
+p) For support of Windows9x/98 we need to retry failed mounts
to *SMBSERVER (default server name) with the uppercase hostname
in the RFC1001 session_init request.
-r) Add Extended Attributed support (for storing UID/GID info
-to Windows servers)
+q) Add support for storing symlink and fifo info to Windows servers
+in the Extended Attribute format their SFU clients would recognize.
-s) Finish fcntl D_NOTIFY support so kde and gnome file list windows
+r) Finish fcntl D_NOTIFY support so kde and gnome file list windows
will autorefresh
-t) Add GUI tool to configure /proc/fs/cifs settings and for display of
+s) Add GUI tool to configure /proc/fs/cifs settings and for display of
the CIFS statistics
KNOWN BUGS (updated May 27, 2004)
differences but worth investigating). Also debug Samba to
see why lock test case 7 takes longer to complete to Samba
than to Windows.
+5) implement search rewind (seeking backward in a readdir), which is
+necessary for one of the "special" subsection of posix file API
+tests in the Connectathon nfs test suite.
Misc testing to do
==================
1) check out max path names and max path name components against various server
-types. Try nested symlinks. Return max path name in stat -f information
+types. Try nested symlinks (8 deep). Return max path name in stat -f information
2) Modify file portion of ltp so it can run against a mounted network
share and run it against cifs vfs.
negotiated size) and send larger write sizes to modern servers.
4) More exhaustively test the recently added NT4 support against various
-NT4 service pack levels.
+NT4 service pack levels, and fix cifs_setattr for setting file times and
+size to fall back to level 1 when error invalid level returned.
#ifndef _CIFS_FS_SB_H
#define _CIFS_FS_SB_H
+#define CIFS_MOUNT_NO_PERM 1 /* do not do client vfs_perm check */
+#define CIFS_MOUNT_SET_UID 2 /* set current->euid in create etc. */
+
struct cifs_sb_info {
struct cifsTconInfo *tcon; /* primary mount */
struct list_head nested_tcon_q;
gid_t mnt_gid;
mode_t mnt_file_mode;
mode_t mnt_dir_mode;
+ int mnt_cifs_flags;
};
#endif /* _CIFS_FS_SB_H */
#include "cifs_fs_sb.h"
#include <linux/mm.h>
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
-/* BB when mempool_resize is added back in, we will resize pool on new mount */
-#define CIFS_MIN_RCV_POOL 11 /* enough for progress to five servers */
#ifdef CONFIG_CIFS_QUOTA
static struct quotactl_ops cifs_quotactl_ops;
static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
{
- struct cifs_sb_info *cifs_sb;
+ struct cifs_sb_info *cifs_sb;
- cifs_sb = CIFS_SB(inode->i_sb);
+ cifs_sb = CIFS_SB(inode->i_sb);
- if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
- /* the server supports the Unix-like mode bits and does its
- own permission checks, and therefore we do not allow the file
- mode to be overriden on these mounts - so do not do perm
- check on client side */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
return 0;
} else /* file mode might have been restricted at mount time
on the client (above and beyond ACL on servers) for
*/
atomic_set(&sesInfoAllocCount, 0);
atomic_set(&tconInfoAllocCount, 0);
+ atomic_set(&tcpSesAllocCount,0);
atomic_set(&tcpSesReconnectCount, 0);
atomic_set(&tconInfoReconnectCount, 0);
#define TRUE 1
#endif
-extern int map_cifs_error(int error_class, int error_code,
- int status_codes_negotiated);
-
extern struct address_space_operations cifs_addr_ops;
/* Functions related to super block operations */
extern struct super_operations cifs_super_ops;
-extern void cifs_put_inode(struct inode *);
extern void cifs_read_inode(struct inode *);
extern void cifs_delete_inode(struct inode *);
/* extern void cifs_write_inode(struct inode *); *//* BB not needed yet */
termination then *2 for unicode versions */
#define MAX_PASSWORD_SIZE 16
+#define CIFS_MIN_RCV_POOL 4
+
/*
* MAX_REQ is the maximum number of requests that WE will send
* on one socket concurently. It also matches the most common
*/
GLOBAL_EXTERN atomic_t sesInfoAllocCount;
GLOBAL_EXTERN atomic_t tconInfoAllocCount;
-
+GLOBAL_EXTERN atomic_t tcpSesAllocCount;
GLOBAL_EXTERN atomic_t tcpSesReconnectCount;
GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
/* PathInfo/FileInfo infolevels */
#define SMB_INFO_STANDARD 1
+#define SMB_SET_FILE_EA 2
+#define SMB_QUERY_FILE_EA_SIZE 2
#define SMB_INFO_QUERY_EAS_FROM_LIST 3
#define SMB_INFO_QUERY_ALL_EAS 4
#define SMB_INFO_IS_NAME_VALID 6
char LinkDest[1];
} FILE_UNIX_LINK_INFO; /* level 513 QPathInfo */
+typedef struct {
+ __u16 CreationDate;
+ __u16 CreationTime;
+ __u16 LastAccessDate;
+ __u16 LastAccessTime;
+ __u16 LastWriteDate;
+ __u16 LastWriteTime;
+ __u32 DataSize; /* File Size (EOF) */
+ __u32 AllocationSize;
+ __u16 Attributes; /* verify not u32 */
+ __u32 EASize;
+} FILE_INFO_STANDARD; /* level 1 SetPath/FileInfo */
+
/* defines for enumerating possible values of the Unix type field below */
#define UNIX_FILE 0
#define UNIX_DIR 1
} FILE_DIRECTORY_INFO; /* level 257 FF response data area */
struct gea {
- unsigned char cbName;
- char szName[1];
+ unsigned char name_len;
+ char name[1];
};
struct gealist {
- unsigned long cbList;
+ unsigned long list_len;
struct gea list[1];
};
unsigned char EA_flags;
__u8 name_len;
__u16 value_len;
- char szName[1];
+ char name[1];
/* optionally followed by value */
};
/* flags for _FEA.fEA */
const struct nls_table *nls_codepage);
extern int CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon,
- char *fileName, FILE_BASIC_INFO * data,
+ const char *fileName, const FILE_BASIC_INFO * data,
const struct nls_table *nls_codepage);
extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon,
- char *fileName, __u64 size,int setAllocationSizeFlag,
+ const char *fileName, __u64 size,int setAllocationSizeFlag,
const struct nls_table *nls_codepage);
extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon,
__u64 size, __u16 fileHandle,__u32 opener_pid, int AllocSizeFlag);
extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
extern void CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, struct nls_table *);
extern void CalcNTLMv2_response(const struct cifsSesInfo *,char * );
-
-extern int CIFSBuildServerList(int xid, char *serverBufferList,
- int recordlength, int *entries,
- int *totalEntries, int *topoChangedFlag);
-extern int CIFSSMBQueryShares(int xid, struct cifsTconInfo *tcon,
- struct shareInfo *shareList, int bufferLen,
- int *entries, int *totalEntries);
-extern int CIFSSMBQueryAlias(int xid, struct cifsTconInfo *tcon,
- struct aliasInfo *aliasList, int bufferLen,
- int *entries, int *totalEntries);
-extern int CIFSSMBAliasInfo(int xid, struct cifsTconInfo *tcon,
- char *aliasName, char *serverName,
- char *shareName, char *comment);
-extern int CIFSSMBGetShareInfo(int xid, struct cifsTconInfo *tcon,
- char *share, char *comment);
-extern int CIFSSMBGetUserPerms(int xid, struct cifsTconInfo *tcon,
- char *userName, char *searchName, int *perms);
-extern int CIFSSMBSync(int xid, struct cifsTconInfo *tcon, int netfid, int pid);
-
-extern int CIFSSMBSeek(int xid,
- struct cifsTconInfo *tcon,
- int netfid,
- int pid,
- int whence, unsigned long offset, long long *newoffset);
-
extern int CIFSSMBCopy(int xid,
struct cifsTconInfo *source_tcon,
const char *fromName,
extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
const int notify_subdirs,const __u16 netfid,__u32 filter,
const struct nls_table *nls_codepage);
-extern int CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName,
- char * EAData, size_t size,
- const struct nls_table *nls_codepage);
+extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+ const unsigned char *searchName, char * EAData,
+ size_t bufsize, const struct nls_table *nls_codepage);
+extern ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
+ const unsigned char * searchName,const unsigned char * ea_name,
+ unsigned char * ea_value, size_t buf_size,
+ const struct nls_table *nls_codepage);
+extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
+ const char *fileName, const char * ea_name,
+ const void * ea_value, const __u16 ea_value_len,
+ const struct nls_table *nls_codepage);
#endif /* _CIFSPROTO_H */
}
int
-CIFSSMBQFSAttributeInfo(int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage)
{
/* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */
}
int
-CIFSSMBQFSDeviceInfo(int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage)
{
/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
}
int
-CIFSSMBQFSUnixInfo(int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon,
const struct nls_table *nls_codepage)
{
/* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */
in Samba which this routine can run into */
int
-CIFSSMBSetEOF(int xid, struct cifsTconInfo *tcon, char *fileName,
+CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
__u64 size, int SetAllocation, const struct nls_table *nls_codepage)
{
struct smb_com_transaction2_spi_req *pSMB = NULL;
}
int
-CIFSSMBSetTimes(int xid, struct cifsTconInfo *tcon, char *fileName,
- FILE_BASIC_INFO * data, const struct nls_table *nls_codepage)
+CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+ const FILE_BASIC_INFO * data,
+ const struct nls_table *nls_codepage)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
return rc;
}
+
+int
+CIFSSMBSetTimesLegacy(int xid, struct cifsTconInfo *tcon, char *fileName,
+ FILE_INFO_STANDARD * data, const struct nls_table *nls_codepage)
+{
+ TRANSACTION2_SPI_REQ *pSMB = NULL;
+ TRANSACTION2_SPI_RSP *pSMBr = NULL;
+ int name_len;
+ int rc = 0;
+ int bytes_returned = 0;
+ char *data_offset;
+
+ cFYI(1, ("In SetTimesLegacy"));
+
+SetTimesRetryLegacy:
+ rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+ (void **) &pSMBr);
+ if (rc)
+ return rc;
+
+ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+ name_len =
+ cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, 530
+ /* find define for this maxpathcomponent */
+ , nls_codepage);
+ name_len++; /* trailing null */
+ name_len *= 2;
+ } else { /* BB improve the check for buffer overruns BB */
+ name_len = strnlen(fileName, 530);
+ name_len++; /* trailing null */
+ strncpy(pSMB->FileName, fileName, name_len);
+ }
+/* BB fixme - we have to map to FILE_STANDARD_INFO (level 1 info
+ in parent function, from the better and ususal FILE_BASIC_INFO */
+ pSMB->ParameterCount = 6 + name_len;
+ pSMB->DataCount = sizeof (FILE_INFO_STANDARD);
+ pSMB->MaxParameterCount = cpu_to_le16(2);
+ pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxSetupCount = 0;
+ pSMB->Reserved = 0;
+ pSMB->Flags = 0;
+ pSMB->Timeout = 0;
+ pSMB->Reserved2 = 0;
+ pSMB->ParameterOffset = offsetof(struct smb_com_transaction2_spi_req,
+ InformationLevel) - 4;
+ pSMB->DataOffset = pSMB->ParameterOffset + pSMB->ParameterCount;
+ data_offset = (char *) (&pSMB->hdr.Protocol) + pSMB->DataOffset;
+ pSMB->ParameterOffset = cpu_to_le16(pSMB->ParameterOffset);
+ pSMB->DataOffset = cpu_to_le16(pSMB->DataOffset);
+ pSMB->SetupCount = 1;
+ pSMB->Reserved3 = 0;
+ pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+ pSMB->ByteCount = 3 /* pad */ + pSMB->ParameterCount + pSMB->DataCount;
+
+ pSMB->DataCount = cpu_to_le16(pSMB->DataCount);
+ pSMB->ParameterCount = cpu_to_le16(pSMB->ParameterCount);
+ pSMB->TotalDataCount = pSMB->DataCount;
+ pSMB->TotalParameterCount = pSMB->ParameterCount;
+ /* I doubt that passthrough levels apply to this old
+ preNT info level */
+/* if (tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)
+ pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_BASIC_INFO2);
+ else*/
+ pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD);
+ pSMB->Reserved4 = 0;
+ pSMB->hdr.smb_buf_length += pSMB->ByteCount;
+ memcpy(data_offset, data, sizeof (FILE_INFO_STANDARD));
+ pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ if (rc) {
+ cFYI(1, ("SetPathInfo (times legacy) returned %d", rc));
+ }
+
+ if (pSMB)
+ cifs_buf_release(pSMB);
+
+ if (rc == -EAGAIN)
+ goto SetTimesRetryLegacy;
+
+ return rc;
+}
+
int
CIFSSMBUnixSetPerms(const int xid, struct cifsTconInfo *tcon,
char *fileName, __u64 mode, __u64 uid, __u64 gid,
return rc;
}
#ifdef CONFIG_CIFS_XATTR
-int
+ssize_t
CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
- char * EAData, size_t size,
+ char * EAData, size_t buf_size,
const struct nls_table *nls_codepage)
{
/* BB assumes one setup word */
int rc = 0;
int bytes_returned;
int name_len;
+ struct fea * temp_fea;
+ char * temp_ptr;
cFYI(1, ("In Query All EAs path %s", searchName));
QAllEAsRetry:
, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
+ } else { /* BB improve the check for buffer overruns BB */
name_len = strnlen(searchName, 530);
name_len++; /* trailing null */
strncpy(pSMB->FileName, searchName, name_len);
ea_response_data = (struct fealist *)
(((char *) &pSMBr->hdr.Protocol) +
pSMBr->DataOffset);
+ ea_response_data->list_len =
+ cpu_to_le32(ea_response_data->list_len);
cFYI(1,("ea length %d",ea_response_data->list_len));
+ name_len = ea_response_data->list_len;
+ if(name_len <= 8) {
+ /* returned EA size zeroed at top of function */
+ cFYI(1,("empty EA list returned from server"));
+ } else {
+ /* account for ea list len */
+ name_len -= 4;
+ temp_fea = ea_response_data->list;
+ temp_ptr = (char *)temp_fea;
+ while(name_len > 0) {
+ name_len -= 4;
+ temp_ptr += 4;
+ rc += temp_fea->name_len;
+ /* account for prefix user. and trailing null */
+ rc = rc + 5 + 1;
+ if(rc<buf_size) {
+ memcpy(EAData,"user.",5);
+ EAData+=5;
+ memcpy(EAData,temp_ptr,temp_fea->name_len);
+ EAData+=temp_fea->name_len;
+ /* null terminate name */
+ *EAData = 0;
+ EAData = EAData + 1;
+ } else if(buf_size == 0) {
+ /* skip copy - calc size only */
+ } else {
+ /* stop before overrun buffer */
+ rc = -ERANGE;
+ break;
+ }
+ name_len -= temp_fea->name_len;
+ temp_ptr += temp_fea->name_len;
+ /* account for trailing null */
+ name_len--;
+ temp_ptr++;
+ temp_fea->value_len = cpu_to_le16(temp_fea->value_len);
+ name_len -= temp_fea->value_len;
+ temp_ptr += temp_fea->value_len;
+ /* BB check that temp_ptr is still within smb BB*/
+ /* no trailing null to account for in value len */
+ /* go on to next EA */
+ temp_fea = (struct fea *)temp_ptr;
+ }
+ }
}
}
if (pSMB)
return rc;
}
+
+ssize_t CIFSSMBQueryEA(const int xid,struct cifsTconInfo * tcon,
+ const unsigned char * searchName,const unsigned char * ea_name,
+ unsigned char * ea_value, size_t buf_size,
+ const struct nls_table *nls_codepage)
+{
+ TRANSACTION2_QPI_REQ *pSMB = NULL;
+ TRANSACTION2_QPI_RSP *pSMBr = NULL;
+ int rc = 0;
+ int bytes_returned;
+ int name_len;
+ struct fea * temp_fea;
+ char * temp_ptr;
+
+ cFYI(1, ("In Query EA path %s", searchName));
+QEARetry:
+ rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+ (void **) &pSMBr);
+ if (rc)
+ return rc;
+
+ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+ name_len =
+ cifs_strtoUCS((wchar_t *) pSMB->FileName, searchName, 530
+ /* find define for this maxpathcomponent */
+ , nls_codepage);
+ name_len++; /* trailing null */
+ name_len *= 2;
+ } else { /* BB improve the check for buffer overruns BB */
+ name_len = strnlen(searchName, 530);
+ name_len++; /* trailing null */
+ strncpy(pSMB->FileName, searchName, name_len);
+ }
+
+ pSMB->TotalParameterCount = 2 /* level */ + 4 /* reserved */ +
+ name_len /* includes null */ ;
+ pSMB->TotalDataCount = 0;
+ pSMB->MaxParameterCount = cpu_to_le16(2);
+ pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxSetupCount = 0;
+ pSMB->Reserved = 0;
+ pSMB->Flags = 0;
+ pSMB->Timeout = 0;
+ pSMB->Reserved2 = 0;
+ pSMB->ParameterOffset = cpu_to_le16(offsetof(
+ struct smb_com_transaction2_qpi_req ,InformationLevel) - 4);
+ pSMB->DataCount = 0;
+ pSMB->DataOffset = 0;
+ pSMB->SetupCount = 1;
+ pSMB->Reserved3 = 0;
+ pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
+ pSMB->ByteCount = pSMB->TotalParameterCount + 1 /* pad */ ;
+ pSMB->TotalParameterCount = cpu_to_le16(pSMB->TotalParameterCount);
+ pSMB->ParameterCount = pSMB->TotalParameterCount;
+ pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
+ pSMB->Reserved4 = 0;
+ pSMB->hdr.smb_buf_length += pSMB->ByteCount;
+ pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
+
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ if (rc) {
+ cFYI(1, ("Send error in Query EA = %d", rc));
+ } else { /* decode response */
+ pSMBr->DataOffset = le16_to_cpu(pSMBr->DataOffset);
+ /* BB also check enough total bytes returned */
+ /* BB we need to improve the validity checking
+ of these trans2 responses */
+ if ((pSMBr->ByteCount < 4) || (pSMBr->DataOffset > 512))
+ rc = -EIO; /* bad smb */
+ /* else if (pFindData){
+ memcpy((char *) pFindData,
+ (char *) &pSMBr->hdr.Protocol +
+ pSMBr->DataOffset, kl);
+ }*/ else {
+ /* check that length of list is not more than bcc */
+ /* check that each entry does not go beyond length
+ of list */
+ /* check that each element of each entry does not
+ go beyond end of list */
+ struct fealist * ea_response_data;
+ rc = -ENOENT;
+ /* validate_trans2_offsets() */
+ /* BB to check if(start of smb + pSMBr->DataOffset > &bcc+ bcc)*/
+ ea_response_data = (struct fealist *)
+ (((char *) &pSMBr->hdr.Protocol) +
+ pSMBr->DataOffset);
+ ea_response_data->list_len =
+ cpu_to_le32(ea_response_data->list_len);
+ cFYI(1,("ea length %d",ea_response_data->list_len));
+ name_len = ea_response_data->list_len;
+ if(name_len <= 8) {
+ /* returned EA size zeroed at top of function */
+ cFYI(1,("empty EA list returned from server"));
+ } else {
+ /* account for ea list len */
+ name_len -= 4;
+ temp_fea = ea_response_data->list;
+ temp_ptr = (char *)temp_fea;
+ /* loop through checking if we have a matching
+ name and then return the associated value */
+ while(name_len > 0) {
+ name_len -= 4;
+ temp_ptr += 4;
+ temp_fea->value_len = cpu_to_le16(temp_fea->value_len);
+ /* BB validate that value_len falls within SMB,
+ even though maximum for name_len is 255 */
+ if(memcmp(temp_fea->name,ea_name,
+ temp_fea->name_len) == 0) {
+ /* found a match */
+ rc = temp_fea->value_len;
+ /* account for prefix user. and trailing null */
+ if(rc<=buf_size) {
+ memcpy(ea_value,
+ temp_fea->name+temp_fea->name_len+1,
+ rc);
+ /* ea values, unlike ea names,
+ are not null terminated */
+ } else if(buf_size == 0) {
+ /* skip copy - calc size only */
+ } else {
+ /* stop before overrun buffer */
+ rc = -ERANGE;
+ }
+ break;
+ }
+ name_len -= temp_fea->name_len;
+ temp_ptr += temp_fea->name_len;
+ /* account for trailing null */
+ name_len--;
+ temp_ptr++;
+ name_len -= temp_fea->value_len;
+ temp_ptr += temp_fea->value_len;
+ /* no trailing null to account for in value len */
+ /* go on to next EA */
+ temp_fea = (struct fea *)temp_ptr;
+ }
+ }
+ }
+ }
+ if (pSMB)
+ cifs_buf_release(pSMB);
+ if (rc == -EAGAIN)
+ goto QEARetry;
+
+ return rc;
+}
+
+int
+CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+ const char * ea_name, const void * ea_value,
+ const __u16 ea_value_len, const struct nls_table *nls_codepage)
+{
+ struct smb_com_transaction2_spi_req *pSMB = NULL;
+ struct smb_com_transaction2_spi_rsp *pSMBr = NULL;
+ struct fealist *parm_data;
+ int name_len;
+ int rc = 0;
+ int bytes_returned = 0;
+
+ cFYI(1, ("In SetEA"));
+SetEARetry:
+ rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
+ (void **) &pSMBr);
+ if (rc)
+ return rc;
+
+ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+ name_len =
+ cifs_strtoUCS((wchar_t *) pSMB->FileName, fileName, 530
+ /* find define for this maxpathcomponent */
+ , nls_codepage);
+ name_len++; /* trailing null */
+ name_len *= 2;
+ } else { /* BB improve the check for buffer overruns BB */
+ name_len = strnlen(fileName, 530);
+ name_len++; /* trailing null */
+ strncpy(pSMB->FileName, fileName, name_len);
+ }
+
+ pSMB->ParameterCount = 6 + name_len;
+
+ /* done calculating parms using name_len of file name,
+ now use name_len to calculate length of ea name
+ we are going to create in the inode xattrs */
+ if(ea_name == NULL)
+ name_len = 0;
+ else
+ name_len = strnlen(ea_name,255);
+
+ pSMB->DataCount = sizeof(*parm_data) + ea_value_len + name_len + 1;
+ pSMB->MaxParameterCount = cpu_to_le16(2);
+ pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB size from sess */
+ pSMB->MaxSetupCount = 0;
+ pSMB->Reserved = 0;
+ pSMB->Flags = 0;
+ pSMB->Timeout = 0;
+ pSMB->Reserved2 = 0;
+ pSMB->ParameterOffset = offsetof(struct smb_com_transaction2_spi_req,
+ InformationLevel) - 4;
+ pSMB->DataOffset = pSMB->ParameterOffset + pSMB->ParameterCount;
+ pSMB->InformationLevel =
+ cpu_to_le16(SMB_SET_FILE_EA);
+
+ parm_data =
+ (struct fealist *) (((char *) &pSMB->hdr.Protocol) +
+ pSMB->DataOffset);
+ pSMB->ParameterOffset = cpu_to_le16(pSMB->ParameterOffset);
+ pSMB->DataOffset = cpu_to_le16(pSMB->DataOffset);
+ pSMB->SetupCount = 1;
+ pSMB->Reserved3 = 0;
+ pSMB->SubCommand = cpu_to_le16(TRANS2_SET_PATH_INFORMATION);
+ pSMB->ByteCount = 3 /* pad */ + pSMB->ParameterCount + pSMB->DataCount;
+ pSMB->DataCount = cpu_to_le16(pSMB->DataCount);
+ parm_data->list_len = (__u32)(pSMB->DataCount);
+ parm_data->list[0].EA_flags = 0;
+ /* we checked above that name len is less than 255 */
+ parm_data->list[0].name_len = (__u8)name_len;;
+ /* EA names are always ASCII */
+ strncpy(parm_data->list[0].name,ea_name,name_len);
+ parm_data->list[0].name[name_len] = 0;
+ parm_data->list[0].value_len = cpu_to_le16(ea_value_len);
+ /* caller ensures that ea_value_len is less than 64K but
+ we need to ensure that it fits within the smb */
+
+ /*BB add length check that it would fit in negotiated SMB buffer size BB */
+ /* if(ea_value_len > buffer_size - 512 (enough for header)) */
+ if(ea_value_len)
+ memcpy(parm_data->list[0].name+name_len+1,ea_value,ea_value_len);
+
+ pSMB->TotalDataCount = pSMB->DataCount;
+ pSMB->ParameterCount = cpu_to_le16(pSMB->ParameterCount);
+ pSMB->TotalParameterCount = pSMB->ParameterCount;
+ pSMB->Reserved4 = 0;
+ pSMB->hdr.smb_buf_length += pSMB->ByteCount;
+ pSMB->ByteCount = cpu_to_le16(pSMB->ByteCount);
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ if (rc) {
+ cFYI(1, ("SetPathInfo (EA) returned %d", rc));
+ }
+
+ if (pSMB)
+ cifs_buf_release(pSMB);
+
+ if (rc == -EAGAIN)
+ goto SetEARetry;
+
+ return rc;
+}
+
#endif
#include <linux/pagemap.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
+#include <linux/mempool.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include "cifspdu.h"
unsigned char *p24);
extern int cifs_inet_pton(int, const char *, void *dst);
+extern mempool_t *cifs_req_poolp;
+
struct smb_vol {
char *username;
char *password;
int rw:1;
int retry:1;
int intr:1;
+ int setuids:1;
+ int noperm:1;
unsigned int rsize;
unsigned int wsize;
unsigned int sockopt;
unsigned int pdu_length, total_read;
struct smb_hdr *smb_buffer = NULL;
struct msghdr smb_msg;
- mm_segment_t temp_fs;
- struct iovec iov;
+ struct kvec iov;
struct socket *csocket = server->ssocket;
struct list_head *tmp;
struct cifsSesInfo *ses;
current->flags |= PF_MEMALLOC;
server->tsk = current; /* save process info to wake at shutdown */
cFYI(1, ("Demultiplex PID: %d", current->pid));
-
- temp_fs = get_fs(); /* we must turn off socket api parm checking */
- set_fs(get_ds());
+ write_lock(&GlobalSMBSeslock);
+ atomic_inc(&tcpSesAllocCount);
+ length = tcpSesAllocCount.counter;
+ write_unlock(&GlobalSMBSeslock);
+ if(length > 1) {
+ mempool_resize(cifs_req_poolp,
+ length + CIFS_MIN_RCV_POOL,
+ GFP_KERNEL);
+ }
while (server->tcpStatus != CifsExiting) {
if (smb_buffer == NULL)
iov.iov_base = smb_buffer;
iov.iov_len = sizeof (struct smb_hdr) - 1;
/* 1 byte less above since wct is not always returned in error cases */
- smb_msg.msg_iov = &iov;
- smb_msg.msg_iovlen = 1;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
length =
- sock_recvmsg(csocket, &smb_msg,
- sizeof (struct smb_hdr) -
- 1 /* RFC1001 header and SMB header */ ,
- MSG_PEEK /* flags see socket.h */ );
+ kernel_recvmsg(csocket, &smb_msg,
+ &iov, 1,
+ sizeof (struct smb_hdr) -
+ 1 /* RFC1001 header and SMB header */ ,
+ MSG_PEEK /* flags see socket.h */ );
if(server->tcpStatus == CifsExiting) {
break;
if (temp[0] == (char) RFC1002_SESSION_KEEP_ALIVE) {
iov.iov_base = smb_buffer;
iov.iov_len = 4;
- length = sock_recvmsg(csocket, &smb_msg, 4, 0);
+ length = kernel_recvmsg(csocket, &smb_msg,
+ &iov, 1, 4, 0);
cFYI(0,("Received 4 byte keep alive packet"));
} else if (temp[0] == (char) RFC1002_POSITIVE_SESSION_RESPONSE) {
- iov.iov_base = smb_buffer;
- iov.iov_len = 4;
- length = sock_recvmsg(csocket, &smb_msg, 4, 0);
+ iov.iov_base = smb_buffer;
+ iov.iov_len = 4;
+ length = kernel_recvmsg(csocket, &smb_msg,
+ &iov, 1, 4, 0);
cFYI(1,("Good RFC 1002 session rsp"));
} else if ((temp[0] == (char)RFC1002_NEGATIVE_SESSION_RESPONSE)
&& (length == 5)) {
for (total_read = 0;
total_read < pdu_length;
total_read += length) {
- length = sock_recvmsg(csocket, &smb_msg,
+ length = kernel_recvmsg(csocket, &smb_msg,
+ &iov, 1,
pdu_length - total_read, 0);
if (length == 0) {
cERROR(1,
("Frame less than four bytes received %d bytes long.",
length));
if (length > 0) {
- length = sock_recvmsg(csocket, &smb_msg, length, 0); /* throw away junk frame */
+ length = kernel_recvmsg(csocket, &smb_msg,
+ &iov, 1,
+ length, 0); /* throw away junk frame */
cFYI(1,
(" with junk 0x%x in it ",
*(__u32 *) smb_buffer));
sock_release(csocket);
server->ssocket = NULL;
}
- set_fs(temp_fs);
if (smb_buffer) /* buffer usually freed in free_mid - need to free it on error or exit */
cifs_buf_release(smb_buffer);
}
kfree(server);
+ write_lock(&GlobalSMBSeslock);
+ atomic_dec(&tcpSesAllocCount);
+ length = tcpSesAllocCount.counter;
+ write_unlock(&GlobalSMBSeslock);
+ if(length > 0) {
+ mempool_resize(cifs_req_poolp,
+ length + CIFS_MIN_RCV_POOL,
+ GFP_KERNEL);
+ }
+
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ/4);
return 0;
vol->retry = 1;
} else if (strnicmp(data, "soft", 4) == 0) {
vol->retry = 0;
+ } else if (strnicmp(data, "perm", 4) == 0) {
+ vol->noperm = 0;
+ } else if (strnicmp(data, "noperm", 6) == 0) {
+ vol->noperm = 1;
+ } else if (strnicmp(data, "setuids", 7) == 0) {
+ vol->setuids = 1;
+ } else if (strnicmp(data, "nosetuids", 9) == 0) {
+ vol->setuids = 0;
} else if (strnicmp(data, "nohard", 6) == 0) {
vol->retry = 0;
} else if (strnicmp(data, "nosoft", 6) == 0) {
cifs_sb->mnt_file_mode = volume_info.file_mode;
cifs_sb->mnt_dir_mode = volume_info.dir_mode;
cFYI(1,("file mode: 0x%x dir mode: 0x%x",cifs_sb->mnt_file_mode,cifs_sb->mnt_dir_mode));
+
+ if(volume_info.noperm)
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
+ if(volume_info.setuids)
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
+
tcon =
find_unc(sin_server.sin_addr.s_addr, volume_info.UNC,
volume_info.username);
then we now have to set the mode if possible */
if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
(oplock & CIFS_CREATE_ACTION))
- CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+ if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+ (__u64)current->euid,
+ (__u64)current->egid,
+ 0 /* dev */,
+ cifs_sb->local_nls);
+ } else {
+ CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
(__u64)-1,
(__u64)-1,
0 /* dev */,
cifs_sb->local_nls);
+ }
else {
/* BB implement via Windows security descriptors */
/* eg CIFSSMBWinSetPerms(xid,pTcon,full_path,mode,-1,-1,local_nls);*/
rc = -ENOMEM;
if (full_path && (pTcon->ses->capabilities & CAP_UNIX)) {
- rc = CIFSSMBUnixSetPerms(xid, pTcon,
- full_path, mode, current->euid, current->egid,
- device_number, cifs_sb->local_nls);
+ if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path,
+ mode,(__u64)current->euid,(__u64)current->egid,
+ device_number, cifs_sb->local_nls);
+ } else {
+ rc = CIFSSMBUnixSetPerms(xid, pTcon,
+ full_path, mode, (__u64)-1, (__u64)-1,
+ device_number, cifs_sb->local_nls);
+ }
+
if(!rc) {
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb,xid);
d_instantiate(direntry, newinode);
if(direntry->d_inode)
direntry->d_inode->i_nlink = 2;
- if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
- CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
- (__u64)-1,
- (__u64)-1,
- 0 /* dev_t */,
- cifs_sb->local_nls);
+ if (cifs_sb->tcon->ses->capabilities & CAP_UNIX)
+ if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+ (__u64)current->euid,
+ (__u64)current->egid,
+ 0 /* dev_t */,
+ cifs_sb->local_nls);
+ } else {
+ CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
+ (__u64)-1,
+ (__u64)-1,
+ 0 /* dev_t */,
+ cifs_sb->local_nls);
+ }
else { /* BB to be implemented via Windows secrty descriptors*/
/* eg CIFSSMBWinSetPerms(xid,pTcon,full_path,mode,-1,-1,local_nls);*/
}
void NTLMSSPOWFencrypt(unsigned char passwd[8],
unsigned char *ntlmchalresp, unsigned char p24[24]);
void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
-int decode_pw_buffer(char in_buffer[516], char *new_pwrd,
- int new_pwrd_size, __u32 * new_pw_len);
/*
This implements the X/Open SMB password encryption
/*
* fs/cifs/smberr.h
*
- * Copyright (c) International Business Machines Corp., 2002
+ * Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French (sfrench@us.ibm.com)
*
* See Error Codes section of the SNIA CIFS Specification
#define ERRinvparm 87
#define ERRdiskfull 112
#define ERRinvname 123
+#define ERRinvlevel 124
#define ERRdirnotempty 145
#define ERRnotlocked 158
#define ERRalreadyexists 183
int rc = 0;
int i = 0;
struct msghdr smb_msg;
- struct iovec iov;
- mm_segment_t temp_fs;
+ struct kvec iov;
if(ssocket == NULL)
return -ENOTSOCK; /* BB eventually add reconnect code here */
smb_msg.msg_name = sin;
smb_msg.msg_namelen = sizeof (struct sockaddr);
- smb_msg.msg_iov = &iov;
- smb_msg.msg_iovlen = 1;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/
cFYI(1, ("Sending smb of length %d ", smb_buf_length));
dump_smb(smb_buffer, smb_buf_length + 4);
- temp_fs = get_fs(); /* we must turn off socket api parm checking */
- set_fs(get_ds());
while(iov.iov_len > 0) {
- rc = sock_sendmsg(ssocket, &smb_msg, smb_buf_length + 4);
+ rc = kernel_sendmsg(ssocket, &smb_msg, &iov, 1, smb_buf_length + 4);
if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
i++;
if(i > 60) {
iov.iov_base += rc;
iov.iov_len -= rc;
}
- set_fs(temp_fs);
if (rc < 0) {
cERROR(1,("Error %d sending data on socket to server.", rc));
#include "cifsproto.h"
#include "cifs_debug.h"
-int cifs_removexattr(struct dentry * direntry, const char * name)
+#define MAX_EA_VALUE_SIZE 65535
+#define CIFS_XATTR_DOS_ATTRIB "user.DOSATTRIB"
+#define CIFS_XATTR_USER_PREFIX "user."
+#define CIFS_XATTR_SYSTEM_PREFIX "system."
+#define CIFS_XATTR_OS2_PREFIX "OS2." /* BB should check for this someday */
+/* also note could add check for security prefix XATTR_SECURITY_PREFIX */
+
+
+int cifs_removexattr(struct dentry * direntry, const char * ea_name)
{
int rc = -EOPNOTSUPP;
+#ifdef CONFIG_CIFS_XATTR
+ int xid;
+ struct cifs_sb_info *cifs_sb;
+ struct cifsTconInfo *pTcon;
+ struct super_block * sb;
+ char * full_path;
+
+ if(direntry == NULL)
+ return -EIO;
+ if(direntry->d_inode == NULL)
+ return -EIO;
+ sb = direntry->d_inode->i_sb;
+ if(sb == NULL)
+ return -EIO;
+ xid = GetXid();
+
+ cifs_sb = CIFS_SB(sb);
+ pTcon = cifs_sb->tcon;
+
+ down(&sb->s_vfs_rename_sem);
+ full_path = build_path_from_dentry(direntry);
+ up(&sb->s_vfs_rename_sem);
+ if(full_path == NULL) {
+ FreeXid(xid);
+ return -ENOMEM;
+ }
+ if(ea_name == NULL) {
+ cFYI(1,("Null xattr names not supported"));
+ } else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)) {
+ cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
+ /* BB what if no namespace prefix? */
+ /* Should we just pass them to server, except for
+ system and perhaps security prefixes? */
+ } else {
+ ea_name+=5; /* skip past user. prefix */
+ rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,NULL,
+ (__u16)0, cifs_sb->local_nls);
+ }
+ if (full_path)
+ kfree(full_path);
+ FreeXid(xid);
+#endif
return rc;
}
-int cifs_setxattr(struct dentry * direntry, const char * name,
- const void * value, size_t size, int flags)
+int cifs_setxattr(struct dentry * direntry, const char * ea_name,
+ const void * ea_value, size_t value_size, int flags)
{
int rc = -EOPNOTSUPP;
+#ifdef CONFIG_CIFS_XATTR
+ int xid;
+ struct cifs_sb_info *cifs_sb;
+ struct cifsTconInfo *pTcon;
+ struct super_block * sb;
+ char * full_path;
+
+ if(direntry == NULL)
+ return -EIO;
+ if(direntry->d_inode == NULL)
+ return -EIO;
+ sb = direntry->d_inode->i_sb;
+ if(sb == NULL)
+ return -EIO;
+ xid = GetXid();
+
+ cifs_sb = CIFS_SB(sb);
+ pTcon = cifs_sb->tcon;
+
+ down(&sb->s_vfs_rename_sem);
+ full_path = build_path_from_dentry(direntry);
+ up(&sb->s_vfs_rename_sem);
+ if(full_path == NULL) {
+ FreeXid(xid);
+ return -ENOMEM;
+ }
+ /* return dos attributes as pseudo xattr */
+ /* return alt name if available as pseudo attr */
+
+ /* if proc/fs/cifs/streamstoxattr is set then
+ search server for EAs or streams to
+ returns as xattrs */
+ if(value_size > MAX_EA_VALUE_SIZE) {
+ cFYI(1,("size of EA value too large"));
+ if(full_path)
+ kfree(full_path);
+ FreeXid(xid);
+ return -EOPNOTSUPP;
+ }
+
+ if(ea_name == NULL) {
+ cFYI(1,("Null xattr names not supported"));
+ } else if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)) {
+ cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
+ /* BB what if no namespace prefix? */
+ /* Should we just pass them to server, except for
+ system and perhaps security prefixes? */
+ } else {
+ ea_name+=5; /* skip past user. prefix */
+ rc = CIFSSMBSetEA(xid,pTcon,full_path,ea_name,ea_value,
+ (__u16)value_size, cifs_sb->local_nls);
+ }
+ if (full_path)
+ kfree(full_path);
+ FreeXid(xid);
+#endif
return rc;
}
-ssize_t cifs_getxattr(struct dentry * direntry, const char * name,
- void * value, size_t size)
+ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
+ void * ea_value, size_t buf_size)
{
ssize_t rc = -EOPNOTSUPP;
+#ifdef CONFIG_CIFS_XATTR
+ int xid;
+ struct cifs_sb_info *cifs_sb;
+ struct cifsTconInfo *pTcon;
+ struct super_block * sb;
+ char * full_path;
+
+ if(direntry == NULL)
+ return -EIO;
+ if(direntry->d_inode == NULL)
+ return -EIO;
+ sb = direntry->d_inode->i_sb;
+ if(sb == NULL)
+ return -EIO;
+ xid = GetXid();
+
+ cifs_sb = CIFS_SB(sb);
+ pTcon = cifs_sb->tcon;
+
+ down(&sb->s_vfs_rename_sem);
+ full_path = build_path_from_dentry(direntry);
+ up(&sb->s_vfs_rename_sem);
+ if(full_path == NULL) {
+ FreeXid(xid);
+ return -ENOMEM;
+ }
+ /* return dos attributes as pseudo xattr */
+ /* return alt name if available as pseudo attr */
+ if(strncmp(ea_name,CIFS_XATTR_USER_PREFIX,5)) {
+ cFYI(1,("illegal xattr namespace %s (only user namespace supported)",ea_name));
+ /* BB what if no namespace prefix? */
+ /* Should we just pass them to server, except for system? */
+ } else {
+ /* We could add a check here
+ if proc/fs/cifs/streamstoxattr is set then
+ search server for EAs or streams to
+ returns as xattrs */
+ ea_name+=5; /* skip past user. */
+ rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
+ buf_size, cifs_sb->local_nls);
+ }
+ if (full_path)
+ kfree(full_path);
+ FreeXid(xid);
+#endif
return rc;
}
-ssize_t cifs_listxattr(struct dentry * direntry, char * ea_data, size_t ea_size)
+ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
{
ssize_t rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
struct cifsTconInfo *pTcon;
struct super_block * sb;
char * full_path;
+
if(direntry == NULL)
return -EIO;
if(direntry->d_inode == NULL)
FreeXid(xid);
return -ENOMEM;
}
- /* return dosattributes as pseudo xattr */
+ /* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
/* if proc/fs/cifs/streamstoxattr is set then
search server for EAs or streams to
returns as xattrs */
- rc = CIFSSMBQAllEAs(xid,pTcon,full_path,ea_data,ea_size,cifs_sb->local_nls);
+ rc = CIFSSMBQAllEAs(xid,pTcon,full_path,data,buf_size,
+ cifs_sb->local_nls);
+
+ if (full_path)
+ kfree(full_path);
FreeXid(xid);
#endif
return rc;
}
int do_reset_coda_vfs_stats( ctl_table * table, int write, struct file * filp,
- void __user * buffer, size_t * lenp )
+ void __user * buffer, size_t * lenp, loff_t * ppos )
{
if ( write ) {
reset_coda_vfs_stats();
- filp->f_pos += *lenp;
+ *ppos += *lenp;
} else {
*lenp = 0;
}
int do_reset_coda_cache_inv_stats( ctl_table * table, int write,
struct file * filp, void __user * buffer,
- size_t * lenp )
+ size_t * lenp, loff_t * ppos )
{
if ( write ) {
reset_coda_cache_inv_stats();
- filp->f_pos += *lenp;
+ *ppos += *lenp;
} else {
*lenp = 0;
}
static int fb_getput_cmap(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct fb_cmap __user *cmap;
+ struct fb_cmap_user __user *cmap;
struct fb_cmap32 __user *cmap32;
__u32 data;
int err;
{
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
+ if (dentry->d_extra_attributes) {
+ kfree(dentry->d_extra_attributes);
+ dentry->d_extra_attributes = NULL;
+ }
call_rcu(&dentry->d_rcu, d_callback);
}
struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
if (!list_empty(&this->d_lru)) {
dentry_stat.nr_unused--;
- list_del(&this->d_lru);
+ list_del_init(&this->d_lru);
}
/*
dentry->d_sb = NULL;
dentry->d_op = NULL;
dentry->d_fsdata = NULL;
+ dentry->d_extra_attributes = NULL;
dentry->d_mounted = 0;
dentry->d_cookie = NULL;
dentry->d_bucket = NULL;
/* Unhash the target: dput() will then get rid of it */
__d_drop(target);
+ /* flush any possible attributes */
+ if (dentry->d_extra_attributes) {
+ kfree(dentry->d_extra_attributes);
+ dentry->d_extra_attributes = NULL;
+ }
+ if (target->d_extra_attributes) {
+ kfree(target->d_extra_attributes);
+ target->d_extra_attributes = NULL;
+ }
+
list_del(&dentry->d_child);
list_del(&target->d_child);
*
* "buflen" should be positive. Caller holds the dcache_lock.
*/
-static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
+char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
struct dentry *root, struct vfsmount *rootmnt,
char *buffer, int buflen)
{
return ERR_PTR(-ENAMETOOLONG);
}
+EXPORT_SYMBOL_GPL(__d_path);
+
/* write full pathname into buffer and return start of pathname */
char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
char *buf, int buflen)
INIT_HLIST_HEAD(&dentry_hashtable[loop]);
}
+void flush_dentry_attributes (void)
+{
+ struct hlist_node *tmp;
+ struct dentry *dentry;
+ int i;
+
+ spin_lock(&dcache_lock);
+ for (i = 0; i <= d_hash_mask; i++)
+ hlist_for_each_entry(dentry, tmp, dentry_hashtable+i, d_hash) {
+ kfree(dentry->d_extra_attributes);
+ dentry->d_extra_attributes = NULL;
+ }
+ spin_unlock(&dcache_lock);
+}
+
+EXPORT_SYMBOL_GPL(flush_dentry_attributes);
+
static void __init dcache_init(unsigned long mempages)
{
/*
static ssize_t stat_read(struct file *file, char __user *buf, size_t len,
loff_t * ppos);
static struct file_operations stat_fops = {
+ .open = nonseekable_open,
.read = stat_read,
};
#endif
/* Devfs daemon file operations */
static struct file_operations devfsd_fops = {
+ .open = nonseekable_open,
.read = devfsd_read,
.ioctl = devfsd_ioctl,
.release = devfsd_close,
struct devfsd_notify_struct *info = fs_info->devfsd_info;
DECLARE_WAITQUEUE(wait, current);
- /* Can't seek (pread) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
/* Verify the task has grabbed the queue */
if (fs_info->devfsd_task != current)
return -EPERM;
num = sprintf(txt, "Number of entries: %u number of bytes: %u\n",
stat_num_entries, stat_num_bytes) + 1;
- /* Can't seek (pread) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (*ppos >= num)
return 0;
if (*ppos + len > num)
tsk->active_mm = mm;
activate_mm(active_mm, mm);
task_unlock(tsk);
+ arch_pick_mmap_layout(mm);
if (old_mm) {
if (active_mm != old_mm) BUG();
mmput(old_mm);
if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
/* Set-uid? */
if (mode & S_ISUID) {
+ current->personality &= ~PER_CLEAR_ON_SETID;
bprm->e_uid = inode->i_uid;
-#ifdef __i386__
- /* reset personality */
- current->personality = PER_LINUX;
-#endif
}
/* Set-gid? */
* executable.
*/
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+ current->personality &= ~PER_CLEAR_ON_SETID;
bprm->e_gid = inode->i_gid;
-#ifdef __i386__
- /* reset personality */
- current->personality = PER_LINUX;
-#endif
}
}
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
unsigned char *types = NULL;
int need_revalidate = (filp->f_version != inode->i_version);
- int ret = 0;
+ int ret;
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
- goto done;
+ goto success;
if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
types = ext2_filetype_table;
le32_to_cpu(de->inode), d_type);
if (over) {
ext2_put_page(page);
- goto done;
+ goto success;
}
}
}
ext2_put_page(page);
}
+success:
+ ret = 0;
done:
filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
filp->f_version = inode->i_version;
- return 0;
+ return ret;
}
/*
return error;
}
-static int dupfd(struct file *file, unsigned int start)
+int dupfd(struct file *file, unsigned int start)
{
struct files_struct * files = current->files;
int fd;
return fd;
}
+EXPORT_SYMBOL_GPL(dupfd);
+
asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
{
int err = -EBADF;
}
filp->f_version = 0;
+ /* We can only do regular read/write on fifos */
+ filp->f_mode &= (FMODE_READ | FMODE_WRITE);
+
switch (filp->f_mode) {
case 1:
/*
memset(filp, 0, sizeof(*filp));
eventpoll_init_file(filp);
filp->f_flags = flags;
- filp->f_mode = (flags+1) & O_ACCMODE;
+ filp->f_mode = ((flags+1) & O_ACCMODE) | FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
atomic_set(&filp->f_count, 1);
filp->f_dentry = dentry;
filp->f_mapping = dentry->d_inode->i_mapping;
} else if (inode->i_state & I_DIRTY) {
/*
* Someone redirtied the inode while were writing back
- * the pages: nothing to do.
+ * the pages.
*/
+ list_move(&inode->i_list, &sb->s_dirty);
} else if (atomic_read(&inode->i_count)) {
/*
* The inode is clean, inuse
struct hfs_btree_header_rec *head;
struct address_space *mapping;
struct page *page;
- unsigned int shift, size;
+ unsigned int size;
tree = kmalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
goto fail_page;
if (!tree->node_count)
goto fail_page;
- for (shift = 0; size >>= 1; shift += 1)
- ;
- tree->node_size_shift = shift;
-
+ tree->node_size_shift = ffs(size) - 1;
tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
kunmap(page);
struct hfs_btree_header_rec *head;
struct address_space *mapping;
struct page *page;
- unsigned int shift, size;
+ unsigned int size;
tree = kmalloc(sizeof(*tree), GFP_KERNEL);
if (!tree)
goto fail_page;
if (!tree->node_count)
goto fail_page;
- for (shift = 0; size >>= 1; shift += 1)
- ;
- tree->node_size_shift = shift;
+ tree->node_size_shift = ffs(size) - 1;
tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
loff_t len, vma_len;
int ret;
+ if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1))
+ return -EINVAL;
+
if (vma->vm_start & ~HPAGE_MASK)
return -EINVAL;
unsigned long v_length;
unsigned long v_offset;
- h_vm_pgoff = vma->vm_pgoff << (HPAGE_SHIFT - PAGE_SHIFT);
- v_length = vma->vm_end - vma->vm_start;
+ h_vm_pgoff = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT);
v_offset = (h_pgoff - h_vm_pgoff) << HPAGE_SHIFT;
-
/*
* Is this VMA fully outside the truncation point?
*/
if (h_vm_pgoff >= h_pgoff)
v_offset = 0;
+ v_length = vma->vm_end - vma->vm_start;
+
zap_hugepage_range(vma,
vma->vm_start + v_offset,
v_length - v_offset);
struct file *hugetlb_zero_setup(size_t size)
{
- int error;
+ int error = -ENOMEM;
struct file *file;
struct inode *inode;
struct dentry *dentry, *root;
struct qstr quick_string;
char buf[16];
- if (!can_do_mlock())
+ if (!capable(CAP_IPC_LOCK))
return ERR_PTR(-EPERM);
if (!is_hugepage_mem_enough(size))
return ERR_PTR(-ENOMEM);
+ if (!user_shm_lock(size, current->user))
+ return ERR_PTR(-ENOMEM);
+
root = hugetlbfs_vfsmount->mnt_root;
snprintf(buf, 16, "%lu", hugetlbfs_counter());
quick_string.name = buf;
quick_string.hash = 0;
dentry = d_alloc(root, &quick_string);
if (!dentry)
- return ERR_PTR(-ENOMEM);
+ goto out_shm_unlock;
error = -ENFILE;
file = get_empty_filp();
put_filp(file);
out_dentry:
dput(dentry);
+out_shm_unlock:
+ user_shm_unlock(size, current->user);
return ERR_PTR(error);
}
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
- /*
- * Now we have the locks, check again to see whether kjournald has
- * taken the buffer off the transaction.
- */
- if (!buffer_jbd(bh))
- goto zap_buffer;
+ jh = journal_grab_journal_head(bh);
+ if (!jh)
+ goto zap_buffer_no_jh;
- jh = bh2jh(bh);
transaction = jh->b_transaction;
if (transaction == NULL) {
/* First case: not on any transaction. If it
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
+ journal_put_journal_head(jh);
return ret;
} else {
/* There is no currently-running transaction. So the
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
+ journal_put_journal_head(jh);
return ret;
} else {
/* The orphan record's transaction has
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
+ journal_put_journal_head(jh);
return 0;
} else {
/* Good, the buffer belongs to the running transaction.
}
zap_buffer:
+ journal_put_journal_head(jh);
+zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
spin_unlock(&journal->j_state_lock);
int jffs_register_jffs_proc_dir(int mtd, struct jffs_control *c)
{
struct jffs_partition_dir *part_dir;
- struct proc_dir_entry *part_info = 0;
- struct proc_dir_entry *part_layout = 0;
- struct proc_dir_entry *part_root = 0;
+ struct proc_dir_entry *part_info = NULL;
+ struct proc_dir_entry *part_layout = NULL;
+ struct proc_dir_entry *part_root = NULL;
char name[10];
sprintf(name, "%d", mtd);
int jffs_unregister_jffs_proc_dir(struct jffs_control *c)
{
struct jffs_partition_dir *part_dir = jffs_part_dirs;
- struct jffs_partition_dir *prev_part_dir = 0;
+ struct jffs_partition_dir *prev_part_dir = NULL;
while (part_dir) {
if (part_dir->c == c) {
int count, int *eof, void *data)
{
struct jffs_control *c = (struct jffs_control *) data;
- struct jffs_fm *fm = 0;
- struct jffs_fm *last_fm = 0;
+ struct jffs_fm *fm = NULL;
+ struct jffs_fm *last_fm = NULL;
int len = 0;
/* Get the first item in the list */
#
# Makefile for the Linux Journalling Flash File System v2 (JFFS2)
#
-# $Id: Makefile.common,v 1.5 2004/07/15 16:06:41 dwmw2 Exp $
+# $Id: Makefile.common,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $
#
obj-$(CONFIG_JFFS2_FS) += jffs2.o
jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o
jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
-jffs2-$(CONFIG_JFFS2_PROC) += proc.o
* For licensing information, see the file 'LICENCE' in the
* jffs2 directory.
*
- * $Id: compr.h,v 1.5 2004/06/23 16:34:39 havasi Exp $
+ * $Id: compr.h,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $
*
*/
void jffs2_lzo_exit(void);
#endif
-/* Prototypes from proc.c */
-int jffs2_proc_init(void);
-int jffs2_proc_exit(void);
-
#endif /* __JFFS2_COMPR_H__ */
goto bad2;
}
if (retlen != sizeof(marker)) {
- printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %d, got %zd\n",
+ printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
jeb->offset, sizeof(marker), retlen);
goto bad2;
}
continue;
}
if (retlen != rawlen) {
- printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %zd) reading header from obsolete node at %08x\n",
+ printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
retlen, rawlen, ref_offset(raw));
continue;
}
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: super.c,v 1.96 2004/07/13 08:57:30 dwmw2 Exp $
+ * $Id: super.c,v 1.97 2004/07/16 15:17:57 dwmw2 Exp $
*
*/
printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n");
return -ENOMEM;
}
-#ifdef CONFIG_JFFS2_PROC
- ret = jffs2_proc_init();
- if (ret) {
- printk(KERN_ERR "JFFS2 error: Failed to initialise proc interface\n");
- goto out;
- }
-#endif
ret = jffs2_compressors_init();
if (ret) {
printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n");
jffs2_destroy_slab_caches();
out_compressors:
jffs2_compressors_exit();
-#ifdef CONFIG_JFFS2_PROC
- jffs2_proc_exit();
-#endif
out:
return ret;
}
unregister_filesystem(&jffs2_fs_type);
jffs2_destroy_slab_caches();
jffs2_compressors_exit();
-#ifdef CONFIG_JFFS2_PROC
- jffs2_proc_exit();
-#endif
kmem_cache_destroy(jffs2_inode_cachep);
}
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/vfs.h>
+#include <asm/uaccess.h>
int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
mntput(mnt);
}
+ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
+ const void *from, size_t available)
+{
+ loff_t pos = *ppos;
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= available)
+ return 0;
+ if (count > available - pos)
+ count = available - pos;
+ if (copy_to_user(to, from + pos, count))
+ return -EFAULT;
+ *ppos = pos + count;
+ return count;
+}
+
EXPORT_SYMBOL(dcache_dir_close);
EXPORT_SYMBOL(dcache_dir_lseek);
EXPORT_SYMBOL(dcache_dir_open);
EXPORT_SYMBOL(simple_statfs);
EXPORT_SYMBOL(simple_sync_file);
EXPORT_SYMBOL(simple_unlink);
+EXPORT_SYMBOL(simple_read_from_buffer);
*
* Initial implementation of mandatory locks. SunOS turned out to be
* a rotten model, so I implemented the "obvious" semantics.
- * See 'linux/Documentation/mandatory.txt' for details.
+ * See 'Documentation/mandatory.txt' for details.
* Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
*
* Don't allow mandatory locks on mmap()'ed files. Added simple functions to
bh = bh->b_this_page;
} while (bh != head);
- if (buffer_heads_over_limit)
+ /*
+ * we cannot drop the bh if the page is not uptodate
+ * or a concurrent readpage would fail to serialize with the bh
+ * and it would read from disk before we reach the platter.
+ */
+ if (buffer_heads_over_limit && PageUptodate(page))
try_to_free_buffers(page);
}
{
struct path next;
struct inode *inode;
- int err;
+ int err, atomic;
unsigned int lookup_flags = nd->flags;
-
+
+ atomic = (lookup_flags & LOOKUP_ATOMIC);
+
while (*name=='/')
name++;
if (!*name)
if (err < 0)
break;
}
+ err = -EWOULDBLOCKIO;
+ if (atomic)
+ break;
nd->flags |= LOOKUP_CONTINUE;
/* This does the actual lookups.. */
err = do_lookup(nd, &this, &next);
if (err < 0)
break;
}
+ err = -EWOULDBLOCKIO;
+ if (atomic)
+ break;
err = do_lookup(nd, &this, &next);
if (err)
break;
if (f & O_DIRECTORY)
retval |= LOOKUP_DIRECTORY;
+ if (f & O_ATOMICLOOKUP)
+ retval |= LOOKUP_ATOMIC;
return retval;
}
struct namespace *new_ns;
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
struct fs_struct *fs = tsk->fs;
+ struct vfsmount *p, *q;
if (!namespace)
return 0;
list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
spin_unlock(&vfsmount_lock);
- /* Second pass: switch the tsk->fs->* elements */
- if (fs) {
- struct vfsmount *p, *q;
- write_lock(&fs->lock);
-
- p = namespace->root;
- q = new_ns->root;
- while (p) {
+ /*
+ * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
+ * as belonging to new namespace. We have already acquired a private
+ * fs_struct, so tsk->fs->lock is not needed.
+ */
+ p = namespace->root;
+ q = new_ns->root;
+ while (p) {
+ q->mnt_namespace = new_ns;
+ if (fs) {
if (p == fs->rootmnt) {
rootmnt = p;
fs->rootmnt = mntget(q);
altrootmnt = p;
fs->altrootmnt = mntget(q);
}
- p = next_mnt(p, namespace->root);
- q = next_mnt(q, new_ns->root);
}
- write_unlock(&fs->lock);
+ p = next_mnt(p, namespace->root);
+ q = next_mnt(q, new_ns->root);
}
up_write(&tsk->namespace->sem);
#endif
}
if (!result)
- inode_setattr(inode, attr);
+ result = inode_setattr(inode, attr);
out:
unlock_kernel();
return result;
#include "ncpsign_kernel.h"
-static int _recv(struct socket *sock, unsigned char *ubuf, int size,
- unsigned flags)
+static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
{
- struct iovec iov;
- struct msghdr msg;
-
- iov.iov_base = ubuf;
- iov.iov_len = size;
-
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
-
- return sock_recvmsg(sock, &msg, size, flags);
+ struct msghdr msg = {NULL, };
+ struct kvec iov = {buf, size};
+ return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
}
-static inline int _send(struct socket *sock, const void *buff, int len)
+static inline int do_send(struct socket *sock, struct kvec *vec, int count,
+ int len, unsigned flags)
{
- struct iovec iov;
- struct msghdr msg;
-
- iov.iov_base = (void *) buff;
- iov.iov_len = len;
-
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_flags = 0;
+ struct msghdr msg = { .msg_flags = flags };
+ return kernel_sendmsg(sock, &msg, vec, count, len);
+}
- return sock_sendmsg(sock, &msg, len);
+static int _send(struct socket *sock, const void *buff, int len)
+{
+ struct kvec vec;
+ vec.iov_base = (void *) buff;
+ vec.iov_len = len;
+ return do_send(sock, &vec, 1, len, 0);
}
struct ncp_request_reply {
size_t datalen;
int result;
enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE } status;
- struct iovec* tx_ciov;
+ struct kvec* tx_ciov;
size_t tx_totallen;
size_t tx_iovlen;
- struct iovec tx_iov[3];
+ struct kvec tx_iov[3];
u_int16_t tx_type;
u_int32_t sign[6];
};
-void ncp_tcp_data_ready(struct sock *sk, int len) {
+void ncp_tcp_data_ready(struct sock *sk, int len)
+{
struct ncp_server *server = sk->sk_user_data;
server->data_ready(sk, len);
schedule_work(&server->rcv.tq);
}
-void ncp_tcp_error_report(struct sock *sk) {
+void ncp_tcp_error_report(struct sock *sk)
+{
struct ncp_server *server = sk->sk_user_data;
server->error_report(sk);
schedule_work(&server->rcv.tq);
}
-void ncp_tcp_write_space(struct sock *sk) {
+void ncp_tcp_write_space(struct sock *sk)
+{
struct ncp_server *server = sk->sk_user_data;
/* We do not need any locking: we first set tx.creq, and then we do sendmsg,
not vice versa... */
server->write_space(sk);
- if (server->tx.creq) {
+ if (server->tx.creq)
schedule_work(&server->tx.tq);
- }
}
-void ncpdgram_timeout_call(unsigned long v) {
+void ncpdgram_timeout_call(unsigned long v)
+{
struct ncp_server *server = (void*)v;
schedule_work(&server->timeout_tq);
}
-static inline void ncp_finish_request(struct ncp_request_reply *req, int result) {
+static inline void ncp_finish_request(struct ncp_request_reply *req, int result)
+{
req->result = result;
req->status = RQ_DONE;
wake_up_all(&req->wq);
}
-static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err) {
+static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err)
+{
struct ncp_request_reply *req;
ncp_invalidate_conn(server);
}
}
-static inline int get_conn_number(struct ncp_reply_header *rp) {
+static inline int get_conn_number(struct ncp_reply_header *rp)
+{
return rp->conn_low | (rp->conn_high << 8);
}
-static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) {
+static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
+{
/* If req is done, we got signal, but we also received answer... */
switch (req->status) {
case RQ_IDLE:
}
}
-static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) {
+static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
+{
down(&server->rcv.creq_sem);
__ncp_abort_request(server, req, err);
up(&server->rcv.creq_sem);
}
-static inline void __ncptcp_abort(struct ncp_server *server) {
+static inline void __ncptcp_abort(struct ncp_server *server)
+{
__abort_ncp_connection(server, NULL, 0);
}
-static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) {
- struct msghdr msg;
- struct iovec iov[3];
-
+static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
+{
+ struct kvec vec[3];
/* sock_sendmsg updates iov pointers for us :-( */
- memcpy(iov, req->tx_ciov, req->tx_iovlen * sizeof(iov[0]));
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_iov = iov;
- msg.msg_iovlen = req->tx_iovlen;
- msg.msg_flags = MSG_DONTWAIT;
- return sock_sendmsg(sock, &msg, req->tx_totallen);
+ memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0]));
+ return do_send(sock, vec, req->tx_iovlen,
+ req->tx_totallen, MSG_DONTWAIT);
}
-static void __ncptcp_try_send(struct ncp_server *server) {
+static void __ncptcp_try_send(struct ncp_server *server)
+{
struct ncp_request_reply *rq;
- struct msghdr msg;
- struct iovec* iov;
- struct iovec iovc[3];
+ struct kvec *iov;
+ struct kvec iovc[3];
int result;
rq = server->tx.creq;
- if (!rq) {
+ if (!rq)
return;
- }
/* sock_sendmsg updates iov pointers for us :-( */
memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_iov = iovc;
- msg.msg_iovlen = rq->tx_iovlen;
- msg.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT;
- result = sock_sendmsg(server->ncp_sock, &msg, rq->tx_totallen);
- if (result == -EAGAIN) {
+ result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
+ rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
+
+ if (result == -EAGAIN)
return;
- }
+
if (result < 0) {
printk(KERN_ERR "ncpfs: tcp: Send failed: %d\n", result);
__ncp_abort_request(server, rq, result);
rq->tx_ciov = iov;
}
-static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h) {
+static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
+{
req->status = RQ_INPROGRESS;
h->conn_low = server->connection;
h->conn_high = server->connection >> 8;
h->sequence = ++server->sequence;
}
-static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req) {
+static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
+{
size_t signlen;
struct ncp_request_header* h;
#define NCP_TCP_XMIT_VERSION (1)
#define NCP_TCP_RCVD_MAGIC (0x744E6350)
-static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req) {
+static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
+{
size_t signlen;
struct ncp_request_header* h;
__ncptcp_try_send(server);
}
-static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req) {
+static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
+{
if (server->ncp_sock->type == SOCK_STREAM)
ncptcp_start_request(server, req);
else
ncpdgram_start_request(server, req);
}
-static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) {
+static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
+{
down(&server->rcv.creq_sem);
if (!ncp_conn_valid(server)) {
up(&server->rcv.creq_sem);
return 0;
}
-static void __ncp_next_request(struct ncp_server *server) {
+static void __ncp_next_request(struct ncp_server *server)
+{
struct ncp_request_reply *req;
server->rcv.creq = NULL;
__ncp_start_request(server, req);
}
-static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len) {
+static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
+{
if (server->info_sock) {
- struct iovec iov[2];
- struct msghdr msg;
+ struct kvec iov[2];
__u32 hdr[2];
hdr[0] = cpu_to_be32(len + 8);
iov[1].iov_base = (void *) data;
iov[1].iov_len = len;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_iov = iov;
- msg.msg_iovlen = 2;
- msg.msg_flags = MSG_NOSIGNAL;
-
- sock_sendmsg(server->info_sock, &msg, len + 8);
+ do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL);
}
}
-static void __ncpdgram_rcv_proc(void *s) {
+void ncpdgram_rcv_proc(void *s)
+{
struct ncp_server *server = s;
struct socket* sock;
struct ncp_reply_header reply;
int result;
- result = _recv(sock, (void*)&reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
+ result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
if (result < 0) {
break;
}
up(&server->rcv.creq_sem);
}
drop:;
- _recv(sock, (void*)&reply, sizeof(reply), MSG_DONTWAIT);
+ _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
}
}
-void ncpdgram_rcv_proc(void *s) {
- mm_segment_t fs;
- struct ncp_server *server = s;
-
- fs = get_fs();
- set_fs(get_ds());
- __ncpdgram_rcv_proc(server);
- set_fs(fs);
-}
-
-static void __ncpdgram_timeout_proc(struct ncp_server *server) {
+static void __ncpdgram_timeout_proc(struct ncp_server *server)
+{
/* If timer is pending, we are processing another request... */
if (!timer_pending(&server->timeout_tm)) {
struct ncp_request_reply* req;
}
}
-void ncpdgram_timeout_proc(void *s) {
- mm_segment_t fs;
+void ncpdgram_timeout_proc(void *s)
+{
struct ncp_server *server = s;
-
- fs = get_fs();
- set_fs(get_ds());
down(&server->rcv.creq_sem);
__ncpdgram_timeout_proc(server);
up(&server->rcv.creq_sem);
- set_fs(fs);
}
-static inline void ncp_init_req(struct ncp_request_reply* req) {
+static inline void ncp_init_req(struct ncp_request_reply* req)
+{
init_waitqueue_head(&req->wq);
req->status = RQ_IDLE;
}
-static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len) {
+static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
+{
int result;
if (buffer) {
return result;
}
-static int __ncptcp_rcv_proc(struct ncp_server *server) {
+static int __ncptcp_rcv_proc(struct ncp_server *server)
+{
/* We have to check the result, so store the complete header */
while (1) {
int result;
}
}
-void ncp_tcp_rcv_proc(void *s) {
- mm_segment_t fs;
+void ncp_tcp_rcv_proc(void *s)
+{
struct ncp_server *server = s;
- fs = get_fs();
- set_fs(get_ds());
down(&server->rcv.creq_sem);
__ncptcp_rcv_proc(server);
up(&server->rcv.creq_sem);
- set_fs(fs);
- return;
}
-void ncp_tcp_tx_proc(void *s) {
- mm_segment_t fs;
+void ncp_tcp_tx_proc(void *s)
+{
struct ncp_server *server = s;
- fs = get_fs();
- set_fs(get_ds());
down(&server->rcv.creq_sem);
__ncptcp_try_send(server);
up(&server->rcv.creq_sem);
- set_fs(fs);
- return;
}
static int do_ncp_rpc_call(struct ncp_server *server, int size,
ncp_init_req(&req);
req.reply_buf = reply_buf;
req.datalen = max_reply_size;
- req.tx_iov[1].iov_base = (void *) server->packet;
+ req.tx_iov[1].iov_base = server->packet;
req.tx_iov[1].iov_len = size;
req.tx_iovlen = 1;
req.tx_totallen = size;
return -EIO;
}
{
- mm_segment_t fs;
sigset_t old_set;
unsigned long mask, flags;
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
- fs = get_fs();
- set_fs(get_ds());
-
result = do_ncp_rpc_call(server, size, reply, max_reply_size);
- set_fs(fs);
-
spin_lock_irqsave(¤t->sighand->siglock, flags);
current->blocked = old_set;
recalc_sigpending();
static int
nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
{
- struct iovec *iov = req->rq_rcv_buf.head;
+ struct kvec *iov = req->rq_rcv_buf.head;
int status, count, recvd, hdrlen;
if ((status = ntohl(*p++)))
nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct iovec *iov = rcvbuf->head;
+ struct kvec *iov = rcvbuf->head;
struct page **page;
int hdrlen, recvd;
int status, nr;
nfs_xdr_readlinkres(struct rpc_rqst *req, u32 *p, void *dummy)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct iovec *iov = rcvbuf->head;
+ struct kvec *iov = rcvbuf->head;
unsigned int hdrlen;
u32 *strlen, len;
char *string;
nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct iovec *iov = rcvbuf->head;
+ struct kvec *iov = rcvbuf->head;
struct page **page;
int hdrlen, recvd;
int status, nr;
nfs3_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct iovec *iov = rcvbuf->head;
+ struct kvec *iov = rcvbuf->head;
unsigned int hdrlen;
u32 *strlen, len;
char *string;
static int
nfs3_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
{
- struct iovec *iov = req->rq_rcv_buf.head;
+ struct kvec *iov = req->rq_rcv_buf.head;
int status, count, ocount, recvd, hdrlen;
status = ntohl(*p++);
WRITE32(FATTR4_WORD0_FILEID);
WRITE32(0);
- /* set up reply iovec
+ /* set up reply kvec
* toplevel_status + taglen + rescount + OP_PUTFH + status
* + OP_READDIR + status + verifer(2) = 9
*/
RESERVE_SPACE(4);
WRITE32(OP_READLINK);
- /* set up reply iovec
+ /* set up reply kvec
* toplevel_status + taglen + rescount + OP_PUTFH + status
* + OP_READLINK + status = 7
*/
if (status)
goto out;
- /* set up reply iovec
+ /* set up reply kvec
* toplevel status + taglen=0 + rescount + OP_PUTFH + status
* + OP_READ + status + eof + datalen = 9
*/
static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_readres *res)
{
- struct iovec *iov = req->rq_rcv_buf.head;
+ struct kvec *iov = req->rq_rcv_buf.head;
uint32_t *p;
uint32_t count, eof, recvd, hdrlen;
int status;
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct page *page = *rcvbuf->pages;
- struct iovec *iov = rcvbuf->head;
+ struct kvec *iov = rcvbuf->head;
unsigned int nr, pglen = rcvbuf->page_len;
uint32_t *end, *entry, *p, *kaddr;
uint32_t len, attrlen, word;
static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct iovec *iov = rcvbuf->head;
+ struct kvec *iov = rcvbuf->head;
uint32_t *strlen;
unsigned int hdrlen, len;
char *string;
static inline u32 *
encode_fh(u32 *p, struct svc_fh *fhp)
{
- int size = fhp->fh_handle.fh_size;
+ unsigned int size = fhp->fh_handle.fh_size;
*p++ = htonl(size);
if (size) p[XDR_QUADLEN(size)-1]=0;
memcpy(p, &fhp->fh_handle.fh_base, size);
nfs3svc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_readargs *args)
{
- int len;
+ unsigned int len;
int v,pn;
if (!(p = decode_fh(p, &args->fh))
if (len > NFSSVC_MAXBLKSIZE)
len = NFSSVC_MAXBLKSIZE;
- /* set up the iovec */
+ /* set up the kvec */
v=0;
while (len > 0) {
pn = rqstp->rq_resused;
nfs3svc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_writeargs *args)
{
- int len, v;
+ unsigned int len, v, hdr;
if (!(p = decode_fh(p, &args->fh))
|| !(p = xdr_decode_hyper(p, &args->offset)))
args->stable = ntohl(*p++);
len = args->len = ntohl(*p++);
+ hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
+ if (rqstp->rq_arg.len < len + hdr)
+ return 0;
+
args->vec[0].iov_base = (void*)p;
- args->vec[0].iov_len = rqstp->rq_arg.head[0].iov_len -
- (((void*)p) - rqstp->rq_arg.head[0].iov_base);
+ args->vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
if (len > NFSSVC_MAXBLKSIZE)
len = NFSSVC_MAXBLKSIZE;
nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_symlinkargs *args)
{
- int len;
+ unsigned int len;
int avail;
char *old, *new;
- struct iovec *vec;
+ struct kvec *vec;
if (!(p = decode_fh(p, &args->ffh))
|| !(p = decode_filename(p, &args->fname, &args->flen))
*/
svc_take_page(rqstp);
len = ntohl(*p++);
- if (len <= 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE)
+ if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE)
return 0;
args->tname = new = page_address(rqstp->rq_respages[rqstp->rq_resused-1]);
args->tlen = len;
if (share_access & NFS4_SHARE_ACCESS_WRITE) {
status = get_write_access(filp->f_dentry->d_inode);
- if (!status)
- filp->f_mode = FMODE_WRITE;
- else
+ if (status)
return nfserrno(status);
+ filp->f_mode = (filp->f_mode | FMODE_WRITE) & ~FMODE_READ;
}
return nfs_ok;
}
{
if (share_access & NFS4_SHARE_ACCESS_WRITE) {
put_write_access(filp->f_dentry->d_inode);
- filp->f_mode = FMODE_READ;
+ filp->f_mode = (filp->f_mode | FMODE_READ) & ~FMODE_WRITE;
}
}
/*
* All that remains is to write the tag and operation count...
*/
- struct iovec *iov;
+ struct kvec *iov;
p = resp->tagp;
*p++ = htonl(resp->taglen);
memcpy(p, resp->tag, resp->taglen);
static struct svc_cacherep * nfscache;
static int cache_disabled = 1;
-static int nfsd_cache_append(struct svc_rqst *rqstp, struct iovec *vec);
+static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
/*
* locking for the reply cache:
nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, u32 *statp)
{
struct svc_cacherep *rp;
- struct iovec *resv = &rqstp->rq_res.head[0], *cachv;
+ struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
int len;
if (!(rp = rqstp->rq_cacherep) || cache_disabled)
* keep a refcount....
*/
static int
-nfsd_cache_append(struct svc_rqst *rqstp, struct iovec *data)
+nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
{
- struct iovec *vec = &rqstp->rq_res.head[0];
+ struct kvec *vec = &rqstp->rq_res.head[0];
if (vec->iov_len + data->iov_len > PAGE_SIZE) {
printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
nfssvc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd_readargs *args)
{
- int len;
+ unsigned int len;
int v,pn;
if (!(p = decode_fh(p, &args->fh)))
return 0;
nfssvc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
struct nfsd_writeargs *args)
{
- int len;
+ unsigned int len;
int v;
if (!(p = decode_fh(p, &args->fh)))
return 0;
*/
int
nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
- struct iovec *vec, int vlen, unsigned long *count)
+ struct kvec *vec, int vlen, unsigned long *count)
{
struct raparms *ra;
mm_segment_t oldfs;
} else {
oldfs = get_fs();
set_fs(KERNEL_DS);
- err = vfs_readv(&file, vec, vlen, &offset);
+ err = vfs_readv(&file, (struct iovec __user *)vec, vlen, &offset);
set_fs(oldfs);
}
*/
int
nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
- struct iovec *vec, int vlen,
+ struct kvec *vec, int vlen,
unsigned long cnt, int *stablep)
{
struct svc_export *exp;
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
- err = vfs_writev(&file, vec, vlen, &offset);
+ err = vfs_writev(&file, (struct iovec __user *)vec, vlen, &offset);
set_fs(oldfs);
if (err >= 0) {
nfsdstats.io_write += cnt;
ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
do_next_sb:
- ntfs_debug("Beginning sub-block at offset = 0x%x in the cb.",
+ ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
cb - cb_start);
/*
* Have we reached the end of the compression block or the end of the
* or signals an error (both covered by the rc test).
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
- ntfs_debug("In index root, offset 0x%x.", (u8*)ie - (u8*)ir);
+ ntfs_debug("In index root, offset 0x%zx.", (u8*)ie - (u8*)ir);
/* Bounds checks. */
if (unlikely((u8*)ie < (u8*)ir || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
goto read_partial_upcase_page;
}
vol->upcase_len = ino->i_size >> UCHAR_T_SIZE_BITS;
- ntfs_debug("Read %llu bytes from $UpCase (expected %u bytes).",
+ ntfs_debug("Read %llu bytes from $UpCase (expected %zu bytes).",
ino->i_size, 64 * 1024 * sizeof(ntfschar));
iput(ino);
down(&ntfs_lock);
if (!f)
goto cleanup_dentry;
f->f_flags = flags;
- f->f_mode = (flags+1) & O_ACCMODE;
+ f->f_mode = ((flags+1) & O_ACCMODE) | FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
inode = dentry->d_inode;
if (f->f_mode & FMODE_WRITE) {
error = get_write_access(inode);
}
EXPORT_SYMBOL(generic_file_open);
+
+/*
+ * This is used by subsystems that don't want seekable
+ * file descriptors
+ */
+int nonseekable_open(struct inode *inode, struct file *filp)
+{
+ filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ return 0;
+}
+
+EXPORT_SYMBOL(nonseekable_open);
static struct dentry *openpromfs_lookup(struct inode *, struct dentry *dentry, struct nameidata *nd);
static int openpromfs_unlink (struct inode *, struct dentry *dentry);
-static ssize_t nodenum_read(struct file *file, char *buf,
+static ssize_t nodenum_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct inode *inode = file->f_dentry->d_inode;
return count;
}
-static ssize_t property_read(struct file *filp, char *buf,
+static ssize_t property_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
struct inode *inode = filp->f_dentry->d_inode;
i = ((u32)(long)inode->u.generic_ip) >> 16;
if ((u16)((long)inode->u.generic_ip) == aliases) {
if (i >= aliases_nodes)
- p = 0;
+ p = NULL;
else
p = alias_names [i];
} else
return -EIO;
op->value [k] = 0;
if (k) {
- for (s = 0, p = op->value; p < op->value + k; p++) {
+ for (s = NULL, p = op->value; p < op->value + k; p++) {
if ((*p >= ' ' && *p <= '~') || *p == '\n') {
op->flag |= OPP_STRING;
s = p;
return count;
}
-static ssize_t property_write(struct file *filp, const char *buf,
+static ssize_t property_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
int i, j, k;
if (filp->f_pos >= 0xffffff || count >= 0xffffff)
return -EINVAL;
if (!filp->private_data) {
- i = property_read (filp, NULL, 0, 0);
+ i = property_read (filp, NULL, 0, NULL);
if (i)
return i;
}
mask &= mask2;
if (mask) {
*first &= ~mask;
- *first |= simple_strtoul (tmp, 0, 16);
+ *first |= simple_strtoul (tmp, NULL, 16);
op->flag |= OPP_DIRTY;
}
} else {
for (j = 0; j < first_off; j++)
mask >>= 1;
*q &= ~mask;
- *q |= simple_strtoul (tmp,0,16);
+ *q |= simple_strtoul (tmp,NULL,16);
}
buf += 9;
} else if ((q == last - 1) && last_cnt
for (j = 0; j < 8 - last_cnt; j++)
mask <<= 1;
*q &= ~mask;
- *q |= simple_strtoul (tmp, 0, 16);
+ *q |= simple_strtoul (tmp, NULL, 16);
buf += last_cnt;
} else {
char tchars[17]; /* XXX yuck... */
if (copy_from_user(tchars, buf, 16))
return -EFAULT;
- *q = simple_strtoul (tchars, 0, 16);
+ *q = simple_strtoul (tchars, NULL, 16);
buf += 9;
}
}
if (disk->fops->revalidate_disk)
disk->fops->revalidate_disk(disk);
if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
- return res;
+ return -EIO;
for (p = 1; p < state->limit; p++) {
sector_t size = state->parts[p].size;
sector_t from = state->parts[p].from;
#endif
}
kfree(state);
- return res;
+ return 0;
}
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
struct iovec *iov = (struct iovec *)_iov;
size_t total_len;
- /* pread is not allowed on pipes. */
- if (unlikely(ppos != &filp->f_pos))
- return -ESPIPE;
-
total_len = iov_length(iov, nr_segs);
/* Null read succeeds. */
if (unlikely(total_len == 0))
struct iovec *iov = (struct iovec *)_iov;
size_t total_len;
- /* pwrite is not allowed on pipes. */
- if (unlikely(ppos != &filp->f_pos))
- return -ESPIPE;
-
total_len = iov_length(iov, nr_segs);
/* Null write succeeds. */
if (unlikely(total_len == 0))
f1->f_pos = f2->f_pos = 0;
f1->f_flags = O_RDONLY;
f1->f_op = &read_pipe_fops;
- f1->f_mode = 1;
+ f1->f_mode = FMODE_READ;
f1->f_version = 0;
/* write file */
f2->f_flags = O_WRONLY;
f2->f_op = &write_pipe_fops;
- f2->f_mode = 2;
+ f2->f_mode = FMODE_WRITE;
f2->f_version = 0;
fd_install(i, f1);
return error;
}
+EXPORT_SYMBOL_GPL(do_pipe);
+
/*
* pipefs should _never_ be mounted by userland - too much of security hassle,
* no real gain from having the whole whorehouse mounted. So we don't need
TASK_INTERRUPTIBLE |
TASK_UNINTERRUPTIBLE |
TASK_ZOMBIE |
+ TASK_DEAD |
TASK_STOPPED);
const char **p = &task_state_array[0];
child = NULL;
while ((child = of_get_next_child(np, child))) {
p = strrchr(child->full_name, '/');
- if (p == 0)
+ if (!p)
p = child->full_name;
else
++p;
lastp = &al->next;
}
of_node_put(child);
- *lastp = 0;
+ *lastp = NULL;
de->subdir = list;
}
struct device_node *root;
if ( !have_of )
return;
- proc_device_tree = proc_mkdir("device-tree", 0);
+ proc_device_tree = proc_mkdir("device-tree", NULL);
if (proc_device_tree == 0)
return;
root = of_find_node_by_path("/");
{
loff_t (*fn)(struct file *, loff_t, int);
- fn = default_llseek;
- if (file->f_op && file->f_op->llseek)
- fn = file->f_op->llseek;
+ fn = no_llseek;
+ if (file->f_mode & FMODE_LSEEK) {
+ fn = default_llseek;
+ if (file->f_op && file->f_op->llseek)
+ fn = file->f_op->llseek;
+ }
return fn(file, offset, origin);
}
EXPORT_SYMBOL(vfs_llseek);
EXPORT_SYMBOL(vfs_write);
+static inline loff_t file_pos_read(struct file *file)
+{
+ return file->f_pos;
+}
+
+static inline void file_pos_write(struct file *file, loff_t pos)
+{
+ file->f_pos = pos;
+}
+
asmlinkage ssize_t sys_read(unsigned int fd, char __user * buf, size_t count)
{
struct file *file;
file = fget_light(fd, &fput_needed);
if (file) {
- ret = vfs_read(file, buf, count, &file->f_pos);
+ loff_t pos = file_pos_read(file);
+ ret = vfs_read(file, buf, count, &pos);
+ file_pos_write(file, pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- ret = vfs_write(file, buf, count, &file->f_pos);
+ loff_t pos = file_pos_read(file);
+ ret = vfs_write(file, buf, count, &pos);
+ file_pos_write(file, pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- ret = vfs_read(file, buf, count, &pos);
+ ret = -ESPIPE;
+ if (file->f_mode & FMODE_PREAD)
+ ret = vfs_read(file, buf, count, &pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- ret = vfs_write(file, buf, count, &pos);
+ ret = -ESPIPE;
+ if (file->f_mode & FMODE_PWRITE)
+ ret = vfs_write(file, buf, count, &pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- ret = vfs_readv(file, vec, vlen, &file->f_pos);
+ loff_t pos = file_pos_read(file);
+ ret = vfs_readv(file, vec, vlen, &pos);
+ file_pos_write(file, pos);
fput_light(file, fput_needed);
}
file = fget_light(fd, &fput_needed);
if (file) {
- ret = vfs_writev(file, vec, vlen, &file->f_pos);
+ loff_t pos = file_pos_read(file);
+ ret = vfs_writev(file, vec, vlen, &pos);
+ file_pos_write(file, pos);
fput_light(file, fput_needed);
}
goto fput_in;
if (!in_file->f_op || !in_file->f_op->sendfile)
goto fput_in;
+ retval = -ESPIPE;
if (!ppos)
ppos = &in_file->f_pos;
+ else
+ if (!(in_file->f_mode & FMODE_PREAD))
+ goto fput_in;
retval = locks_verify_area(FLOCK_VERIFY_READ, in_inode, in_file, *ppos, count);
if (retval)
goto fput_in;
sema_init(&p->sem, 1);
p->op = op;
file->private_data = p;
+
+ /* SEQ files support lseek, but not pread/pwrite */
+ file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
return 0;
}
EXPORT_SYMBOL(seq_open);
void *p;
int err = 0;
- if (ppos != &file->f_pos)
- return -EPIPE;
-
down(&m->sem);
/* grab buffer if we didn't have one */
if (!m->buf) {
int rq_bytes_sent;
int rq_iovlen;
- struct iovec rq_iov[4];
+ struct kvec rq_iov[4];
int (*rq_setup_read) (struct smb_request *);
void (*rq_callback) (struct smb_request *);
static int
_recvfrom(struct socket *socket, unsigned char *ubuf, int size, unsigned flags)
{
- struct iovec iov;
- struct msghdr msg;
- mm_segment_t fs;
-
- fs = get_fs();
- set_fs(get_ds());
- flags |= MSG_DONTWAIT | MSG_NOSIGNAL;
-
- msg.msg_flags = flags;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- iov.iov_base = ubuf;
- iov.iov_len = size;
-
- size = sock_recvmsg(socket, &msg, size, flags);
-
- set_fs(fs);
- return size;
+ struct kvec iov = {ubuf, size};
+ struct msghdr msg = {.msg_flags = flags};
+ msg.msg_flags |= MSG_DONTWAIT | MSG_NOSIGNAL;
+ return kernel_recvmsg(socket, &msg, &iov, 1, size, msg.msg_flags);
}
/*
}
/*
- * Adjust the iovec to move on 'n' bytes (from nfs/sunrpc)
+ * Adjust the kvec to move on 'n' bytes (from nfs/sunrpc)
*/
static int
-smb_move_iov(struct msghdr *msg, struct iovec *niv, unsigned amount)
+smb_move_iov(struct kvec **data, size_t *num, struct kvec *vec, unsigned amount)
{
- struct iovec *iv = msg->msg_iov;
+ struct kvec *iv = *data;
int i;
int len;
/*
- * Eat any sent iovecs
+ * Eat any sent kvecs
*/
while (iv->iov_len <= amount) {
amount -= iv->iov_len;
iv++;
- msg->msg_iovlen--;
+ (*num)--;
}
/*
* And chew down the partial one
*/
- niv[0].iov_len = iv->iov_len-amount;
- niv[0].iov_base =((unsigned char *)iv->iov_base)+amount;
+ vec[0].iov_len = iv->iov_len-amount;
+ vec[0].iov_base =((unsigned char *)iv->iov_base)+amount;
iv++;
- len = niv[0].iov_len;
+ len = vec[0].iov_len;
/*
* And copy any others
*/
- for (i = 1; i < msg->msg_iovlen; i++) {
- niv[i] = *iv++;
- len += niv[i].iov_len;
+ for (i = 1; i < *num; i++) {
+ vec[i] = *iv++;
+ len += vec[i].iov_len;
}
- msg->msg_iov = niv;
+ *data = vec;
return len;
}
{
struct socket *sock;
unsigned int flags;
- struct iovec iov;
+ struct kvec iov;
struct msghdr msg;
- mm_segment_t fs;
int rlen = smb_len(server->header) - server->smb_read + 4;
int result = -EIO;
+ if (rlen > PAGE_SIZE)
+ rlen = PAGE_SIZE;
+
sock = server_sock(server);
if (!sock)
goto out;
if (sock->sk->sk_state != TCP_ESTABLISHED)
goto out;
- fs = get_fs();
- set_fs(get_ds());
-
flags = MSG_DONTWAIT | MSG_NOSIGNAL;
iov.iov_base = drop_buffer;
iov.iov_len = PAGE_SIZE;
msg.msg_flags = flags;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
msg.msg_control = NULL;
- if (rlen > PAGE_SIZE)
- rlen = PAGE_SIZE;
-
- result = sock_recvmsg(sock, &msg, rlen, flags);
-
- set_fs(fs);
+ result = kernel_recvmsg(sock, &msg, &iov, 1, rlen, flags);
VERBOSE("read: %d\n", result);
if (result < 0) {
{
struct socket *sock;
unsigned int flags;
- struct iovec iov[4];
+ struct kvec iov[4];
+ struct kvec *p = req->rq_iov;
+ size_t num = req->rq_iovlen;
struct msghdr msg;
- mm_segment_t fs;
int rlen;
int result = -EIO;
if (sock->sk->sk_state != TCP_ESTABLISHED)
goto out;
- fs = get_fs();
- set_fs(get_ds());
-
flags = MSG_DONTWAIT | MSG_NOSIGNAL;
msg.msg_flags = flags;
msg.msg_name = NULL;
msg.msg_namelen = 0;
- msg.msg_iov = req->rq_iov;
- msg.msg_iovlen = req->rq_iovlen;
msg.msg_control = NULL;
/* Dont repeat bytes and count available bufferspace */
- rlen = smb_move_iov(&msg, iov, req->rq_bytes_recvd);
+ rlen = smb_move_iov(&p, &num, iov, req->rq_bytes_recvd);
if (req->rq_rlen < rlen)
rlen = req->rq_rlen;
- result = sock_recvmsg(sock, &msg, rlen, flags);
-
- set_fs(fs);
+ result = kernel_recvmsg(sock, &msg, p, num, rlen, flags);
VERBOSE("read: %d\n", result);
if (result < 0) {
int
smb_send_request(struct smb_request *req)
{
- mm_segment_t fs;
struct smb_sb_info *server = req->rq_server;
struct socket *sock;
- struct msghdr msg;
+ struct msghdr msg = {.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT};
int slen = req->rq_slen - req->rq_bytes_sent;
int result = -EIO;
- struct iovec iov[4];
+ struct kvec iov[4];
+ struct kvec *p = req->rq_iov;
+ size_t num = req->rq_iovlen;
sock = server_sock(server);
if (!sock)
if (sock->sk->sk_state != TCP_ESTABLISHED)
goto out;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_iov = req->rq_iov;
- msg.msg_iovlen = req->rq_iovlen;
- msg.msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT;
-
/* Dont repeat bytes */
if (req->rq_bytes_sent)
- smb_move_iov(&msg, iov, req->rq_bytes_sent);
+ smb_move_iov(&p, &num, iov, req->rq_bytes_sent);
- fs = get_fs();
- set_fs(get_ds());
- result = sock_sendmsg(sock, &msg, slen);
- set_fs(fs);
+ result = kernel_sendmsg(sock, &msg, p, num, slen);
if (result >= 0) {
req->rq_bytes_sent += result;
int write,
struct file *filp,
void *buffer,
- size_t *lenp)
+ size_t *lenp,
+ loff_t *ppos)
{
int c, ret, *valp = ctl->data;
__uint32_t vn_active;
- ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp);
+ ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp, ppos);
if (!ret && write && *valp) {
printk("XFS Clearing xfsstats\n");
#define O_DIRECTORY 0100000 /* must be a directory */
#define O_NOFOLLOW 0200000 /* don't follow links */
#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */
+#define O_ATOMICLOOKUP 01000000 /* do atomic file lookup */
#define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */
#define O_NOATIME 04000000
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
#endif /* __KERNEL__ */
#endif /* _ALPHA_PAGE_H */
{INR_OPEN, INR_OPEN}, /* RLIMIT_NOFILE */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_AS */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_NPROC */ \
- {PAGE_SIZE, PAGE_SIZE}, /* RLIMIT_MEMLOCK */ \
+ {32768, 32768 }, /* RLIMIT_MEMLOCK */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_LOCKS */ \
{MAX_SIGPENDING, MAX_SIGPENDING}, /* RLIMIT_SIGPENDING */ \
{MQ_BYTES_MAX, MQ_BYTES_MAX}, /* RLIMIT_MSGQUEUE */ \
#define __get_user_check(x,ptr,size,segment) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
- const __typeof__(*(ptr)) *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (__access_ok((unsigned long)__gu_addr,size,segment)) { \
__gu_err = 0; \
#define __put_user_check(x,ptr,size,segment) \
({ \
long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (__access_ok((unsigned long)__pu_addr,size,segment)) { \
__pu_err = 0; \
extern unsigned long s3c2410_hclk;
extern unsigned long s3c2410_fclk;
+/* external functions for GPIO support
+ *
+ * These allow various different clients to access the same GPIO
+ * registers without conflicting. If your driver only owns the entire
+ * GPIO register, then it is safe to ioremap/__raw_{read|write} to it.
+*/
+
+/* s3c2410_gpio_cfgpin
+ *
+ * set the configuration of the given pin to the value passed.
+ *
+ * eg:
+ * s3c2410_gpio_cfgpin(S3C2410_GPA0, S3C2410_GPA0_ADDR0);
+ * s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1);
+*/
+
+extern void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function);
+
+/* s3c2410_gpio_pullup
+ *
+ * configure the pull-up control on the given pin
+ *
+ * to = 1 => disable the pull-up
+ * 0 => enable the pull-up
+ *
+ * eg;
+ *
+ * s3c2410_gpio_pullup(S3C2410_GPB0, 0);
+ * s3c2410_gpio_pullup(S3C2410_GPE8, 0);
+*/
+
+extern void s3c2410_gpio_pullup(unsigned int pin, unsigned int to);
+
+extern void s3c2410_gpio_setpin(unsigned int pin, unsigned int to);
+
#endif /* __ASSEMBLY__ */
#include <asm/sizes.h>
/* linux/include/asm/hardware/s3c2410/
*
- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
- * http://www.simtec.co.uk/products/SWLINUX/
+ * Copyright (c) 2003,2004 Simtec Electronics <linux@simtec.co.uk>
+ * http://www.simtec.co.uk/products/SWLINUX/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* 19-06-2003 BJD Created file
* 23-06-2003 BJD Updated GSTATUS registers
* 12-03-2004 BJD Updated include protection
+ * 20-07-2004 BJD Added GPIO pin numbers, added Port A definitions
*/
#ifndef __ASM_ARCH_REGS_GPIO_H
#define __ASM_ARCH_REGS_GPIO_H "$Id: gpio.h,v 1.5 2003/05/19 12:51:08 ben Exp $"
+#define S3C2410_GPIONO(bank,offset) ((bank) + (offset))
+
+#define S3C2410_GPIO_BANKA (32*0)
+#define S3C2410_GPIO_BANKB (32*1)
+#define S3C2410_GPIO_BANKC (32*2)
+#define S3C2410_GPIO_BANKD (32*3)
+#define S3C2410_GPIO_BANKE (32*4)
+#define S3C2410_GPIO_BANKF (32*5)
+#define S3C2410_GPIO_BANKG (32*6)
+#define S3C2410_GPIO_BANKH (32*7)
+
+#define S3C2410_GPIO_BASE(pin) ((((pin) & ~31) >> 1) + S3C2410_VA_GPIO)
+#define S3C2410_GPIO_OFFSET(pin) ((pin) & 31)
+
+/* general configuration options */
+
+#define S3C2410_GPIO_LEAVE (0xFFFFFFFF)
+
/* configure GPIO ports A..G */
#define S3C2410_GPIOREG(x) ((x) + S3C2410_VA_GPIO)
#define S3C2410_GPACON S3C2410_GPIOREG(0x00)
#define S3C2410_GPADAT S3C2410_GPIOREG(0x04)
+#define S3C2410_GPA0 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 0)
+#define S3C2410_GPA0_OUT (0<<0)
+#define S3C2410_GPA0_ADDR0 (1<<0)
+
+#define S3C2410_GPA1 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 1)
+#define S3C2410_GPA1_OUT (0<<1)
+#define S3C2410_GPA1_ADDR16 (1<<1)
+
+#define S3C2410_GPA2 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 2)
+#define S3C2410_GPA2_OUT (0<<2)
+#define S3C2410_GPA2_ADDR17 (1<<2)
+
+#define S3C2410_GPA3 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 3)
+#define S3C2410_GPA3_OUT (0<<3)
+#define S3C2410_GPA3_ADDR18 (1<<3)
+
+#define S3C2410_GPA4 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 4)
+#define S3C2410_GPA4_OUT (0<<4)
+#define S3C2410_GPA4_ADDR19 (1<<4)
+
+#define S3C2410_GPA5 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 5)
+#define S3C2410_GPA5_OUT (0<<5)
+#define S3C2410_GPA5_ADDR20 (1<<5)
+
+#define S3C2410_GPA6 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 6)
+#define S3C2410_GPA6_OUT (0<<6)
+#define S3C2410_GPA6_ADDR21 (1<<6)
+
+#define S3C2410_GPA7 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 7)
+#define S3C2410_GPA7_OUT (0<<7)
+#define S3C2410_GPA7_ADDR22 (1<<7)
+
+#define S3C2410_GPA8 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 8)
+#define S3C2410_GPA8_OUT (0<<8)
+#define S3C2410_GPA8_ADDR23 (1<<8)
+
+#define S3C2410_GPA9 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 9)
+#define S3C2410_GPA9_OUT (0<<9)
+#define S3C2410_GPA9_ADDR24 (1<<9)
+
+#define S3C2410_GPA10 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 10)
+#define S3C2410_GPA10_OUT (0<<10)
+#define S3C2410_GPA10_ADDR25 (1<<10)
+
+#define S3C2410_GPA11 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 11)
+#define S3C2410_GPA11_OUT (0<<11)
+#define S3C2410_GPA11_ADDR26 (1<<11)
+
+#define S3C2410_GPA12 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 12)
+#define S3C2410_GPA12_OUT (0<<12)
+#define S3C2410_GPA12_nGCS1 (1<<12)
+
+#define S3C2410_GPA13 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 13)
+#define S3C2410_GPA13_OUT (0<<13)
+#define S3C2410_GPA13_nGCS2 (1<<13)
+
+#define S3C2410_GPA14 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 14)
+#define S3C2410_GPA14_OUT (0<<14)
+#define S3C2410_GPA14_nGCS3 (1<<14)
+
+#define S3C2410_GPA15 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 15)
+#define S3C2410_GPA15_OUT (0<<15)
+#define S3C2410_GPA15_nGCS4 (1<<15)
+
+#define S3C2410_GPA16 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 16)
+#define S3C2410_GPA16_OUT (0<<16)
+#define S3C2410_GPA16_nGCS5 (1<<16)
+
+#define S3C2410_GPA17 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 17)
+#define S3C2410_GPA17_OUT (0<<17)
+#define S3C2410_GPA17_CLE (1<<17)
+
+#define S3C2410_GPA18 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 18)
+#define S3C2410_GPA18_OUT (0<<18)
+#define S3C2410_GPA18_ALE (1<<18)
+
+#define S3C2410_GPA19 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 19)
+#define S3C2410_GPA19_OUT (0<<19)
+#define S3C2410_GPA19_nFWE (1<<19)
+
+#define S3C2410_GPA20 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 20)
+#define S3C2410_GPA20_OUT (0<<20)
+#define S3C2410_GPA20_nFRE (1<<20)
+
+#define S3C2410_GPA21 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 21)
+#define S3C2410_GPA21_OUT (0<<21)
+#define S3C2410_GPA21_nRSTOUT (1<<21)
+
+#define S3C2410_GPA22 S3C2410_GPIONO(S3C2410_GPIO_BANKA, 22)
+#define S3C2410_GPA22_OUT (0<<22)
+#define S3C2410_GPA22_nFCE (1<<22)
+
/* 0x08 and 0x0c are reserved */
/* GPB is 10 IO pins, each configured by 2 bits each in GPBCON.
/* no i/o pin in port b can have value 3! */
+#define S3C2410_GPB0 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 0)
#define S3C2410_GPB0_INP (0x00 << 0)
#define S3C2410_GPB0_OUTP (0x01 << 0)
#define S3C2410_GPB0_TOUT0 (0x02 << 0)
+#define S3C2410_GPB1 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 1)
#define S3C2410_GPB1_INP (0x00 << 2)
#define S3C2410_GPB1_OUTP (0x01 << 2)
#define S3C2410_GPB1_TOUT1 (0x02 << 2)
+#define S3C2410_GPB2 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 2)
#define S3C2410_GPB2_INP (0x00 << 4)
#define S3C2410_GPB2_OUTP (0x01 << 4)
#define S3C2410_GPB2_TOUT2 (0x02 << 4)
+#define S3C2410_GPB3 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 3)
#define S3C2410_GPB3_INP (0x00 << 6)
#define S3C2410_GPB3_OUTP (0x01 << 6)
#define S3C2410_GPB3_TOUT3 (0x02 << 6)
+#define S3C2410_GPB4 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 4)
#define S3C2410_GPB4_INP (0x00 << 8)
#define S3C2410_GPB4_OUTP (0x01 << 8)
#define S3C2410_GPB4_TCLK0 (0x02 << 8)
#define S3C2410_GPB4_MASK (0x03 << 8)
+#define S3C2410_GPB5 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 5)
#define S3C2410_GPB5_INP (0x00 << 10)
#define S3C2410_GPB5_OUTP (0x01 << 10)
#define S3C2410_GPB5_nXBACK (0x02 << 10)
+#define S3C2410_GPB6 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 6)
#define S3C2410_GPB6_INP (0x00 << 12)
#define S3C2410_GPB6_OUTP (0x01 << 12)
#define S3C2410_GPB6_nXBREQ (0x02 << 12)
+#define S3C2410_GPB7 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 7)
#define S3C2410_GPB7_INP (0x00 << 14)
#define S3C2410_GPB7_OUTP (0x01 << 14)
#define S3C2410_GPB7_nXDACK1 (0x02 << 14)
+#define S3C2410_GPB8 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 8)
#define S3C2410_GPB8_INP (0x00 << 16)
#define S3C2410_GPB8_OUTP (0x01 << 16)
#define S3C2410_GPB8_nXDREQ1 (0x02 << 16)
+#define S3C2410_GPB9 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 9)
#define S3C2410_GPB9_INP (0x00 << 18)
#define S3C2410_GPB9_OUTP (0x01 << 18)
#define S3C2410_GPB9_nXDACK0 (0x02 << 18)
-#define S3C2410_GPB10_INP (0x00 << 18)
-#define S3C2410_GPB10_OUTP (0x01 << 18)
+#define S3C2410_GPB10 S3C2410_GPIONO(S3C2410_GPIO_BANKB, 10)
+#define S3C2410_GPB10_INP (0x00 << 18)
+#define S3C2410_GPB10_OUTP (0x01 << 18)
#define S3C2410_GPB10_nXDRE0 (0x02 << 18)
/* Port C consits of 16 GPIO/Special function
#define S3C2410_GPCDAT S3C2410_GPIOREG(0x24)
#define S3C2410_GPCUP S3C2410_GPIOREG(0x28)
+#define S3C2410_GPC0 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 0)
#define S3C2410_GPC0_INP (0x00 << 0)
#define S3C2410_GPC0_OUTP (0x01 << 0)
#define S3C2410_GPC0_LEND (0x02 << 0)
+#define S3C2410_GPC1 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 1)
#define S3C2410_GPC1_INP (0x00 << 2)
#define S3C2410_GPC1_OUTP (0x01 << 2)
#define S3C2410_GPC1_VCLK (0x02 << 2)
+#define S3C2410_GPC2 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 2)
#define S3C2410_GPC2_INP (0x00 << 4)
#define S3C2410_GPC2_OUTP (0x01 << 4)
#define S3C2410_GPC2_VLINE (0x02 << 4)
+#define S3C2410_GPC3 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 3)
#define S3C2410_GPC3_INP (0x00 << 6)
#define S3C2410_GPC3_OUTP (0x01 << 6)
#define S3C2410_GPC3_VFRAME (0x02 << 6)
+#define S3C2410_GPC4 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 4)
#define S3C2410_GPC4_INP (0x00 << 8)
#define S3C2410_GPC4_OUTP (0x01 << 8)
#define S3C2410_GPC4_VM (0x02 << 8)
+#define S3C2410_GPC5 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 5)
#define S3C2410_GPC5_INP (0x00 << 10)
#define S3C2410_GPC5_OUTP (0x01 << 10)
#define S3C2410_GPC5_LCDVF0 (0x02 << 10)
+#define S3C2410_GPC6 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 6)
#define S3C2410_GPC6_INP (0x00 << 12)
#define S3C2410_GPC6_OUTP (0x01 << 12)
#define S3C2410_GPC6_LCDVF1 (0x02 << 12)
+#define S3C2410_GPC7 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 7)
#define S3C2410_GPC7_INP (0x00 << 14)
#define S3C2410_GPC7_OUTP (0x01 << 14)
#define S3C2410_GPC7_LCDVF2 (0x02 << 14)
+#define S3C2410_GPC8 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 8)
#define S3C2410_GPC8_INP (0x00 << 16)
#define S3C2410_GPC8_OUTP (0x01 << 16)
#define S3C2410_GPC8_VD0 (0x02 << 16)
+#define S3C2410_GPC9 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 9)
#define S3C2410_GPC9_INP (0x00 << 18)
#define S3C2410_GPC9_OUTP (0x01 << 18)
#define S3C2410_GPC9_VD1 (0x02 << 18)
+#define S3C2410_GPC10 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 10)
#define S3C2410_GPC10_INP (0x00 << 20)
#define S3C2410_GPC10_OUTP (0x01 << 20)
#define S3C2410_GPC10_VD2 (0x02 << 20)
+#define S3C2410_GPC11 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 11)
#define S3C2410_GPC11_INP (0x00 << 22)
#define S3C2410_GPC11_OUTP (0x01 << 22)
#define S3C2410_GPC11_VD3 (0x02 << 22)
+#define S3C2410_GPC12 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 12)
#define S3C2410_GPC12_INP (0x00 << 24)
#define S3C2410_GPC12_OUTP (0x01 << 24)
#define S3C2410_GPC12_VD4 (0x02 << 24)
+#define S3C2410_GPC13 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 13)
#define S3C2410_GPC13_INP (0x00 << 26)
#define S3C2410_GPC13_OUTP (0x01 << 26)
#define S3C2410_GPC13_VD5 (0x02 << 26)
+#define S3C2410_GPC14 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 14)
#define S3C2410_GPC14_INP (0x00 << 28)
#define S3C2410_GPC14_OUTP (0x01 << 28)
#define S3C2410_GPC14_VD6 (0x02 << 28)
+#define S3C2410_GPC15 S3C2410_GPIONO(S3C2410_GPIO_BANKC, 15)
#define S3C2410_GPC15_INP (0x00 << 30)
#define S3C2410_GPC15_OUTP (0x01 << 30)
#define S3C2410_GPC15_VD7 (0x02 << 30)
#define S3C2410_GPDDAT S3C2410_GPIOREG(0x34)
#define S3C2410_GPDUP S3C2410_GPIOREG(0x38)
+#define S3C2410_GPD0 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 0)
#define S3C2410_GPD0_INP (0x00 << 0)
#define S3C2410_GPD0_OUTP (0x01 << 0)
#define S3C2410_GPD0_VD8 (0x02 << 0)
+#define S3C2410_GPD1 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 1)
#define S3C2410_GPD1_INP (0x00 << 2)
#define S3C2410_GPD1_OUTP (0x01 << 2)
#define S3C2410_GPD1_VD9 (0x02 << 2)
+#define S3C2410_GPD2 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 2)
#define S3C2410_GPD2_INP (0x00 << 4)
#define S3C2410_GPD2_OUTP (0x01 << 4)
#define S3C2410_GPD2_VD10 (0x02 << 4)
+#define S3C2410_GPD3 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 3)
#define S3C2410_GPD3_INP (0x00 << 6)
#define S3C2410_GPD3_OUTP (0x01 << 6)
#define S3C2410_GPD3_VD11 (0x02 << 6)
+#define S3C2410_GPD4 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 4)
#define S3C2410_GPD4_INP (0x00 << 8)
#define S3C2410_GPD4_OUTP (0x01 << 8)
#define S3C2410_GPD4_VD12 (0x02 << 8)
+#define S3C2410_GPD5 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 5)
#define S3C2410_GPD5_INP (0x00 << 10)
#define S3C2410_GPD5_OUTP (0x01 << 10)
#define S3C2410_GPD5_VD13 (0x02 << 10)
+#define S3C2410_GPD6 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 6)
#define S3C2410_GPD6_INP (0x00 << 12)
#define S3C2410_GPD6_OUTP (0x01 << 12)
#define S3C2410_GPD6_VD14 (0x02 << 12)
+#define S3C2410_GPD7 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 7)
#define S3C2410_GPD7_INP (0x00 << 14)
#define S3C2410_GPD7_OUTP (0x01 << 14)
#define S3C2410_GPD7_VD15 (0x02 << 14)
+#define S3C2410_GPD8 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 8)
#define S3C2410_GPD8_INP (0x00 << 16)
#define S3C2410_GPD8_OUTP (0x01 << 16)
#define S3C2410_GPD8_VD16 (0x02 << 16)
+#define S3C2410_GPD9 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 9)
#define S3C2410_GPD9_INP (0x00 << 18)
#define S3C2410_GPD9_OUTP (0x01 << 18)
#define S3C2410_GPD9_VD17 (0x02 << 18)
+#define S3C2410_GPD10 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 10)
#define S3C2410_GPD10_INP (0x00 << 20)
#define S3C2410_GPD10_OUTP (0x01 << 20)
#define S3C2410_GPD10_VD18 (0x02 << 20)
+#define S3C2410_GPD11 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 11)
#define S3C2410_GPD11_INP (0x00 << 22)
#define S3C2410_GPD11_OUTP (0x01 << 22)
#define S3C2410_GPD11_VD19 (0x02 << 22)
+#define S3C2410_GPD12 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 12)
#define S3C2410_GPD12_INP (0x00 << 24)
#define S3C2410_GPD12_OUTP (0x01 << 24)
#define S3C2410_GPD12_VD20 (0x02 << 24)
+#define S3C2410_GPD13 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 13)
#define S3C2410_GPD13_INP (0x00 << 26)
#define S3C2410_GPD13_OUTP (0x01 << 26)
#define S3C2410_GPD13_VD21 (0x02 << 26)
+#define S3C2410_GPD14 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 14)
#define S3C2410_GPD14_INP (0x00 << 28)
#define S3C2410_GPD14_OUTP (0x01 << 28)
#define S3C2410_GPD14_VD22 (0x02 << 28)
+#define S3C2410_GPD15 S3C2410_GPIONO(S3C2410_GPIO_BANKD, 15)
#define S3C2410_GPD15_INP (0x00 << 30)
#define S3C2410_GPD15_OUTP (0x01 << 30)
#define S3C2410_GPD15_VD23 (0x02 << 30)
#define S3C2410_GPEDAT S3C2410_GPIOREG(0x44)
#define S3C2410_GPEUP S3C2410_GPIOREG(0x48)
+#define S3C2410_GPE0 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 0)
#define S3C2410_GPE0_INP (0x00 << 0)
#define S3C2410_GPE0_OUTP (0x01 << 0)
#define S3C2410_GPE0_I2SLRCK (0x02 << 0)
#define S3C2410_GPE0_MASK (0x03 << 0)
+#define S3C2410_GPE1 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 1)
#define S3C2410_GPE1_INP (0x00 << 2)
#define S3C2410_GPE1_OUTP (0x01 << 2)
#define S3C2410_GPE1_I2SSCLK (0x02 << 2)
#define S3C2410_GPE1_MASK (0x03 << 2)
+#define S3C2410_GPE2 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 2)
#define S3C2410_GPE2_INP (0x00 << 4)
#define S3C2410_GPE2_OUTP (0x01 << 4)
#define S3C2410_GPE2_CDCLK (0x02 << 4)
+#define S3C2410_GPE3 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 3)
#define S3C2410_GPE3_INP (0x00 << 6)
#define S3C2410_GPE3_OUTP (0x01 << 6)
#define S3C2410_GPE3_I2SSDI (0x02 << 6)
#define S3C2410_GPE3_MASK (0x03 << 6)
+#define S3C2410_GPE4 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 4)
#define S3C2410_GPE4_INP (0x00 << 8)
#define S3C2410_GPE4_OUTP (0x01 << 8)
#define S3C2410_GPE4_I2SSDO (0x02 << 8)
#define S3C2410_GPE4_MASK (0x03 << 8)
+#define S3C2410_GPE5 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 5)
#define S3C2410_GPE5_INP (0x00 << 10)
#define S3C2410_GPE5_OUTP (0x01 << 10)
#define S3C2410_GPE5_SDCLK (0x02 << 10)
+#define S3C2410_GPE6 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 6)
#define S3C2410_GPE6_INP (0x00 << 12)
#define S3C2410_GPE6_OUTP (0x01 << 12)
#define S3C2410_GPE6_SDCLK (0x02 << 12)
+#define S3C2410_GPE7 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 7)
#define S3C2410_GPE7_INP (0x00 << 14)
#define S3C2410_GPE7_OUTP (0x01 << 14)
#define S3C2410_GPE7_SDCMD (0x02 << 14)
+#define S3C2410_GPE8 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 8)
#define S3C2410_GPE8_INP (0x00 << 16)
#define S3C2410_GPE8_OUTP (0x01 << 16)
#define S3C2410_GPE8_SDDAT1 (0x02 << 16)
+#define S3C2410_GPE9 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 9)
#define S3C2410_GPE9_INP (0x00 << 18)
#define S3C2410_GPE9_OUTP (0x01 << 18)
#define S3C2410_GPE9_SDDAT2 (0x02 << 18)
+#define S3C2410_GPE10 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 10)
#define S3C2410_GPE10_INP (0x00 << 20)
#define S3C2410_GPE10_OUTP (0x01 << 20)
#define S3C2410_GPE10_SDDAT3 (0x02 << 20)
+#define S3C2410_GPE11 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 11)
#define S3C2410_GPE11_INP (0x00 << 22)
#define S3C2410_GPE11_OUTP (0x01 << 22)
#define S3C2410_GPE11_SPIMISO0 (0x02 << 22)
+#define S3C2410_GPE12 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 12)
#define S3C2410_GPE12_INP (0x00 << 24)
#define S3C2410_GPE12_OUTP (0x01 << 24)
#define S3C2410_GPE12_SPIMOSI0 (0x02 << 24)
+#define S3C2410_GPE13 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 13)
#define S3C2410_GPE13_INP (0x00 << 26)
#define S3C2410_GPE13_OUTP (0x01 << 26)
#define S3C2410_GPE13_SPICLK0 (0x02 << 26)
+#define S3C2410_GPE14 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 14)
#define S3C2410_GPE14_INP (0x00 << 28)
#define S3C2410_GPE14_OUTP (0x01 << 28)
#define S3C2410_GPE14_IICSCL (0x02 << 28)
#define S3C2410_GPE14_MASK (0x03 << 28)
+#define S3C2410_GPE15 S3C2410_GPIONO(S3C2410_GPIO_BANKE, 15)
#define S3C2410_GPE15_INP (0x00 << 30)
#define S3C2410_GPE15_OUTP (0x01 << 30)
#define S3C2410_GPE15_IICSDA (0x02 << 30)
#define S3C2410_GPFDAT S3C2410_GPIOREG(0x54)
#define S3C2410_GPFUP S3C2410_GPIOREG(0x58)
-
+#define S3C2410_GPF0 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 0)
#define S3C2410_GPF0_INP (0x00 << 0)
#define S3C2410_GPF0_OUTP (0x01 << 0)
#define S3C2410_GPF0_EINT0 (0x02 << 0)
+#define S3C2410_GPF1 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 1)
#define S3C2410_GPF1_INP (0x00 << 2)
#define S3C2410_GPF1_OUTP (0x01 << 2)
#define S3C2410_GPF1_EINT1 (0x02 << 2)
+#define S3C2410_GPF2 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 2)
#define S3C2410_GPF2_INP (0x00 << 4)
#define S3C2410_GPF2_OUTP (0x01 << 4)
#define S3C2410_GPF2_EINT2 (0x02 << 4)
+#define S3C2410_GPF3 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 3)
#define S3C2410_GPF3_INP (0x00 << 6)
#define S3C2410_GPF3_OUTP (0x01 << 6)
#define S3C2410_GPF3_EINT3 (0x02 << 6)
+#define S3C2410_GPF4 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 4)
#define S3C2410_GPF4_INP (0x00 << 8)
#define S3C2410_GPF4_OUTP (0x01 << 8)
#define S3C2410_GPF4_EINT4 (0x02 << 8)
+#define S3C2410_GPF5 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 5)
#define S3C2410_GPF5_INP (0x00 << 10)
#define S3C2410_GPF5_OUTP (0x01 << 10)
#define S3C2410_GPF5_EINT5 (0x02 << 10)
+#define S3C2410_GPF6 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 6)
#define S3C2410_GPF6_INP (0x00 << 12)
#define S3C2410_GPF6_OUTP (0x01 << 12)
#define S3C2410_GPF6_EINT6 (0x02 << 12)
+#define S3C2410_GPF7 S3C2410_GPIONO(S3C2410_GPIO_BANKF, 7)
#define S3C2410_GPF7_INP (0x00 << 14)
#define S3C2410_GPF7_OUTP (0x01 << 14)
#define S3C2410_GPF7_EINT7 (0x02 << 14)
#define S3C2410_GPGDAT S3C2410_GPIOREG(0x64)
#define S3C2410_GPGUP S3C2410_GPIOREG(0x68)
+#define S3C2410_GPG0 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 0)
#define S3C2410_GPG0_INP (0x00 << 0)
#define S3C2410_GPG0_OUTP (0x01 << 0)
#define S3C2410_GPG0_EINT8 (0x02 << 0)
+#define S3C2410_GPG1 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 1)
#define S3C2410_GPG1_INP (0x00 << 2)
#define S3C2410_GPG1_OUTP (0x01 << 2)
#define S3C2410_GPG1_EINT9 (0x02 << 2)
+#define S3C2410_GPG2 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 2)
#define S3C2410_GPG2_INP (0x00 << 4)
#define S3C2410_GPG2_OUTP (0x01 << 4)
#define S3C2410_GPG2_EINT10 (0x02 << 4)
+#define S3C2410_GPG3 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 3)
#define S3C2410_GPG3_INP (0x00 << 6)
#define S3C2410_GPG3_OUTP (0x01 << 6)
#define S3C2410_GPG3_EINT11 (0x02 << 6)
+#define S3C2410_GPG4 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 4)
#define S3C2410_GPG4_INP (0x00 << 8)
#define S3C2410_GPG4_OUTP (0x01 << 8)
#define S3C2410_GPG4_EINT12 (0x02 << 8)
#define S3C2410_GPG4_LCDPWREN (0x03 << 8)
+#define S3C2410_GPG5 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 5)
#define S3C2410_GPG5_INP (0x00 << 10)
#define S3C2410_GPG5_OUTP (0x01 << 10)
#define S3C2410_GPG5_EINT13 (0x02 << 10)
#define S3C2410_GPG5_SPIMISO1 (0x03 << 10)
+#define S3C2410_GPG6 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 6)
#define S3C2410_GPG6_INP (0x00 << 12)
#define S3C2410_GPG6_OUTP (0x01 << 12)
#define S3C2410_GPG6_EINT14 (0x02 << 12)
#define S3C2410_GPG6_SPIMOSI1 (0x03 << 12)
+#define S3C2410_GPG7 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 7)
#define S3C2410_GPG7_INP (0x00 << 14)
#define S3C2410_GPG7_OUTP (0x01 << 14)
#define S3C2410_GPG7_EINT15 (0x02 << 14)
#define S3C2410_GPG7_SPICLK1 (0x03 << 14)
+#define S3C2410_GPG8 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 8)
#define S3C2410_GPG8_INP (0x00 << 16)
#define S3C2410_GPG8_OUTP (0x01 << 16)
#define S3C2410_GPG8_EINT16 (0x02 << 16)
+#define S3C2410_GPG9 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 9)
#define S3C2410_GPG9_INP (0x00 << 18)
#define S3C2410_GPG9_OUTP (0x01 << 18)
#define S3C2410_GPG9_EINT17 (0x02 << 18)
+#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG10_INP (0x00 << 20)
#define S3C2410_GPG10_OUTP (0x01 << 20)
#define S3C2410_GPG10_EINT18 (0x02 << 20)
+#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG11_INP (0x00 << 22)
#define S3C2410_GPG11_OUTP (0x01 << 22)
#define S3C2410_GPG11_EINT19 (0x02 << 22)
#define S3C2410_GPG11_TCLK1 (0x03 << 22)
+#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG12_INP (0x00 << 24)
#define S3C2410_GPG12_OUTP (0x01 << 24)
#define S3C2410_GPG12_EINT18 (0x02 << 24)
#define S3C2410_GPG12_XMON (0x03 << 24)
+#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG13_INP (0x00 << 26)
#define S3C2410_GPG13_OUTP (0x01 << 26)
#define S3C2410_GPG13_EINT18 (0x02 << 26)
#define S3C2410_GPG13_nXPON (0x03 << 26)
+#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG14_INP (0x00 << 28)
#define S3C2410_GPG14_OUTP (0x01 << 28)
#define S3C2410_GPG14_EINT18 (0x02 << 28)
#define S3C2410_GPG14_YMON (0x03 << 28)
+#define S3C2410_GPG10 S3C2410_GPIONO(S3C2410_GPIO_BANKG, 10)
#define S3C2410_GPG15_INP (0x00 << 30)
#define S3C2410_GPG15_OUTP (0x01 << 30)
#define S3C2410_GPG15_EINT18 (0x02 << 30)
#define S3C2410_GPHDAT S3C2410_GPIOREG(0x74)
#define S3C2410_GPHUP S3C2410_GPIOREG(0x78)
+#define S3C2410_GPH0 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 0)
#define S3C2410_GPH0_INP (0x00 << 0)
#define S3C2410_GPH0_OUTP (0x01 << 0)
#define S3C2410_GPH0_nCTS0 (0x02 << 0)
+#define S3C2410_GPH1 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 1)
#define S3C2410_GPH1_INP (0x00 << 2)
#define S3C2410_GPH1_OUTP (0x01 << 2)
#define S3C2410_GPH1_nRTS0 (0x02 << 2)
+#define S3C2410_GPH2 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 2)
#define S3C2410_GPH2_INP (0x00 << 4)
#define S3C2410_GPH2_OUTP (0x01 << 4)
#define S3C2410_GPH2_TXD0 (0x02 << 4)
+#define S3C2410_GPH3 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 3)
#define S3C2410_GPH3_INP (0x00 << 6)
#define S3C2410_GPH3_OUTP (0x01 << 6)
#define S3C2410_GPH3_RXD0 (0x02 << 6)
+#define S3C2410_GPH4 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 4)
#define S3C2410_GPH4_INP (0x00 << 8)
#define S3C2410_GPH4_OUTP (0x01 << 8)
#define S3C2410_GPH4_TXD1 (0x02 << 8)
+#define S3C2410_GPH5 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 5)
#define S3C2410_GPH5_INP (0x00 << 10)
#define S3C2410_GPH5_OUTP (0x01 << 10)
#define S3C2410_GPH5_RXD1 (0x02 << 10)
+#define S3C2410_GPH6 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 6)
#define S3C2410_GPH6_INP (0x00 << 12)
#define S3C2410_GPH6_OUTP (0x01 << 12)
#define S3C2410_GPH6_TXD2 (0x02 << 12)
#define S3C2410_GPH6_nRTS1 (0x03 << 12)
+#define S3C2410_GPH7 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 7)
#define S3C2410_GPH7_INP (0x00 << 14)
#define S3C2410_GPH7_OUTP (0x01 << 14)
#define S3C2410_GPH7_RXD2 (0x02 << 14)
#define S3C2410_GPH7_nCTS1 (0x03 << 14)
+#define S3C2410_GPH8 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 8)
#define S3C2410_GPH8_INP (0x00 << 16)
#define S3C2410_GPH8_OUTP (0x01 << 16)
#define S3C2410_GPH8_UCLK (0x02 << 16)
-#define S3C2410_GPH9_INP (0x00 << 18)
-#define S3C2410_GPH9_OUTP (0x01 << 18)
-#define S3C2410_GPH9_CLKOUT0 (0x02 << 18)
+#define S3C2410_GPH9 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 9)
+#define S3C2410_GPH9_INP (0x00 << 18)
+#define S3C2410_GPH9_OUTP (0x01 << 18)
+#define S3C2410_GPH9_CLKOUT0 (0x02 << 18)
-#define S3C2410_GPH10_INP (0x00 << 20)
-#define S3C2410_GPH10_OUTP (0x01 << 20)
-#define S3C2410_GPH10_CLKOUT1 (0x02 << 20)
+#define S3C2410_GPH10 S3C2410_GPIONO(S3C2410_GPIO_BANKH, 10)
+#define S3C2410_GPH10_INP (0x00 << 20)
+#define S3C2410_GPH10_OUTP (0x01 << 20)
+#define S3C2410_GPH10_CLKOUT1 (0x02 << 20)
/* miscellaneous control */
extern int _find_first_zero_bit_be(void * p, unsigned size);
extern int _find_next_zero_bit_be(void * p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_bit_be(unsigned long *p, int size, int offset);
+extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
/*
* The __* form of bitops are non-atomic and may be reordered.
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
- * See linux/Documentation/cachetlb.txt for more information.
+ * See Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
* ioremap and friends.
*
* ioremap takes a PCI memory address, as specified in
- * linux/Documentation/IO-mapping.txt.
+ * Documentation/IO-mapping.txt.
*/
extern void * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
extern void __iounmap(void *addr);
* See arch/arm/kernel/sys-arm.c for ugly details..
*/
struct ipc_kludge {
- struct msgbuf *msgp;
+ struct msgbuf __user *msgp;
long msgtyp;
};
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING}, \
* published by the Free Software Foundation.
*
* Structure passed to kernel to tell it about the
- * hardware it's running on. See linux/Documentation/arm/Setup
+ * hardware it's running on. See Documentation/arm/Setup
* for more info.
*/
#ifndef __ASMARM_SETUP_H
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
-typedef void (*__sighandler_t)(int);
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+
+typedef void __restorefn_t(void);
+typedef __restorefn_t __user *__sigrestore_t;
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
- void (*sa_restorer)(void);
+ __sigrestore_t sa_restorer;
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
- void (*sa_restorer)(void);
+ __sigrestore_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
#endif /* __KERNEL__ */
typedef struct sigaltstack {
- void *ss_sp;
+ void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
/* We use 33-bit arithmetic here... */
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
+ __chk_user_ptr(addr); \
__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
#define get_user(x,p) \
({ \
- const register typeof(*(p)) *__p asm("r0") = (p); \
+ const register typeof(*(p)) __user *__p asm("r0") = (p);\
register typeof(*(p)) __r1 asm("r1"); \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
+ __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
#define put_user(x,p) \
({ \
const register typeof(*(p)) __r1 asm("r1") = (x); \
- const register typeof(*(p)) *__p asm("r0") = (p); \
+ const register typeof(*(p)) __user *__p asm("r0") = (p);\
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
+ __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
* ioremap and friends.
*
* ioremap takes a PCI memory address, as specified in
- * linux/Documentation/IO-mapping.txt.
+ * Documentation/IO-mapping.txt.
*/
extern void * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
extern void __iounmap(void *addr);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING}, \
* published by the Free Software Foundation.
*
* Structure passed to kernel to tell it about the
- * hardware it's running on. See linux/Documentation/arm/Setup
+ * hardware it's running on. See Documentation/arm/Setup
* for more info.
*/
#ifndef __ASMARM_SETUP_H
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* _CRIS_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#define page_test_and_clear_young(page) (0)
#endif
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
#endif /* __ASSEMBLY__ */
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* _H8300_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#define AT_SYSINFO_EHDR 33
#ifdef __KERNEL__
-/* child inherits the personality of the parent */
#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ */
+#define elf_read_implies_exec_binary(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
+
extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *);
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
+#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#ifdef CONFIG_X86_IO_APIC
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
static inline int use_pci_vector(void) {return 1;}
static inline void disable_edge_ioapic_vector(unsigned int vector) { }
static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
#ifndef _ASM_IRQ_VECTORS_LIMITS_H
#define _ASM_IRQ_VECTORS_LIMITS_H
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
#define NR_IRQS FIRST_SYSTEM_VECTOR
#define NR_IRQ_VECTORS NR_IRQS
#else
return order;
}
+extern int devmem_is_allowed(unsigned long pagenr);
+
#endif /* __ASSEMBLY__ */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+
#endif /* __KERNEL__ */
#define check_pgt_cache() do { } while (0)
-#define HAVE_ARCH_UNMAPPED_AREA 1
-
#endif /* _I386_PGALLOC_H */
*/
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
-#define SHLIB_BASE 0x00111000
-
#define __HAVE_ARCH_ALIGN_STACK
extern unsigned long arch_align_stack(unsigned long sp);
-#define __HAVE_ARCH_MMAP_TOP
-extern unsigned long mmap_top(void);
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
/*
* Size of io_bitmap, covering ports 0 to 0x3ff.
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
*/
#if !defined(IN_STRING_C)
+#define __HAVE_ARCH_STRCPY
static inline char * strcpy(char * dest,const char *src)
{
int d0, d1, d2;
return dest;
}
+#define __HAVE_ARCH_STRNCPY
static inline char * strncpy(char * dest,const char *src,size_t count)
{
int d0, d1, d2, d3;
return count;
}
+#define __HAVE_ARCH_STRCAT
static inline char * strcat(char * dest,const char * src)
{
int d0, d1, d2, d3;
return dest;
}
+#define __HAVE_ARCH_STRNCAT
static inline char * strncat(char * dest,const char * src,size_t count)
{
int d0, d1, d2, d3;
return dest;
}
+#define __HAVE_ARCH_STRCMP
static inline int strcmp(const char * cs,const char * ct)
{
int d0, d1;
return __res;
}
+#define __HAVE_ARCH_STRNCMP
static inline int strncmp(const char * cs,const char * ct,size_t count)
{
register int __res;
return __res;
}
+#define __HAVE_ARCH_STRCHR
static inline char * strchr(const char * s, int c)
{
int d0;
return __res;
}
+#define __HAVE_ARCH_STRRCHR
static inline char * strrchr(const char * s, int c)
{
int d0, d1;
#include <linux/thread_info.h>
#include <linux/prefetch.h>
#include <linux/string.h>
-#include <linux/compiler.h>
#include <asm/page.h>
#define VERIFY_READ 0
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
might_sleep(); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
: "m"(__m(addr)), "i"(errret), "0"(err))
-unsigned long __must_check __copy_to_user_ll(void __user *to, const void *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll(void *to, const void __user *from, unsigned long n);
+unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n);
+unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long n);
/*
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-static inline unsigned long __must_check
+static inline unsigned long
__direct_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-static inline unsigned long __must_check
+static inline unsigned long
__direct_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-static inline unsigned long __must_check
+static inline unsigned long
direct_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_sleep();
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-static inline unsigned long __must_check
+static inline unsigned long
direct_copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
#define NR_syscalls 284
+#ifndef __KERNEL_SYSCALLS_NO_ERRNO__
/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
#define __syscall_return(type, res) \
return (type) (res); \
} while (0)
+#else
+# define __syscall_return(type, res) return (type) (res)
+#endif
+
/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
#define _syscall0(type,name) \
type name(void) \
* won't be any messing with the stack from main(), but we define
* some others too.
*/
-static inline _syscall0(pid_t,setsid)
-static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
-static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
-static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
-static inline _syscall1(int,dup,int,fd)
static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
-static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
-static inline _syscall1(int,close,int,fd)
-static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount);
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
#ifdef CONFIG_IA64_CYCLONE
extern int use_cyclone;
-extern int __init cyclone_setup(char*);
+extern void __init cyclone_setup(void);
#else /* CONFIG_IA64_CYCLONE */
#define use_cyclone 0
-static inline void cyclone_setup(char* s)
+static inline void cyclone_setup(void)
{
printk(KERN_ERR "Cyclone Counter: System not configured"
" w/ CONFIG_IA64_CYCLONE.\n");
#define AT_SYSINFO_EHDR 33
#ifdef __KERNEL__
-struct elf64_hdr;
-extern void ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter);
-#define SET_PERSONALITY(ex, ibcs2) ia64_set_personality(&(ex), ibcs2)
+#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
+#define elf_read_implies_exec(ex, have_pt_gnu_stack) \
+ (!(have_pt_gnu_stack) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
struct task_struct;
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
+#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long);
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef struct irq_desc *ia64_mv_irq_desc (unsigned int);
-typedef u8 ia64_mv_irq_to_vector (u8);
-typedef unsigned int ia64_mv_local_vector_to_irq (u8 vector);
+typedef u8 ia64_mv_irq_to_vector (unsigned int);
+typedef unsigned int ia64_mv_local_vector_to_irq (u8);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
#ifdef CONFIG_IA64_DIG
/* Max 8 Nodes */
#define NODES_SHIFT 3
+#elif defined(CONFIG_IA64_HP_ZX1)
+/* Max 32 Nodes */
+#define NODES_SHIFT 5
#elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
/* Max 256 Nodes */
#define NODES_SHIFT 8
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
- (((current->thread.flags & IA64_THREAD_XSTACK) != 0) \
+ (((current->personality & READ_IMPLIES_EXEC) != 0) \
? VM_EXEC : 0))
+#define devmem_is_allowed(x) 1
+
#endif /* _ASM_IA64_PAGE_H */
static inline pgd_t*
pgd_alloc_one_fast (struct mm_struct *mm)
{
- unsigned long *ret = pgd_quicklist;
+ unsigned long *ret = NULL;
+ preempt_disable();
+
+ ret = pgd_quicklist;
if (likely(ret != NULL)) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
} else
ret = NULL;
+
+ preempt_enable();
+
return (pgd_t *) ret;
}
static inline void
pgd_free (pgd_t *pgd)
{
+ preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
++pgtable_cache_size;
+ preempt_enable();
}
static inline void
static inline pmd_t*
pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
{
- unsigned long *ret = (unsigned long *)pmd_quicklist;
+ unsigned long *ret = NULL;
+ preempt_disable();
+
+ ret = (unsigned long *)pmd_quicklist;
if (likely(ret != NULL)) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
}
+
+ preempt_enable();
+
return (pmd_t *)ret;
}
static inline void
pmd_free (pmd_t *pmd)
{
+ preempt_disable();
*(unsigned long *)pmd = (unsigned long) pmd_quicklist;
pmd_quicklist = (unsigned long *) pmd;
++pgtable_cache_size;
+ preempt_enable();
}
#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
#define pgd_offset_k(addr) \
(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+/* Look up a pgd entry in the gate area. On IA-64, the gate-area
+ resides in the kernel-mapped segment, hence we use pgd_offset_k()
+ here. */
+#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
+
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,addr) \
((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME
+#define __HAVE_ARCH_PGD_OFFSET_GATE
#include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */
#define _ASM_IA64_PROCESSOR_H
/*
- * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
/* bit 5 is currently unused */
#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
-#define IA64_THREAD_XSTACK (__IA64_UL(1) << 8) /* stack executable by default? */
#define IA64_THREAD_UAC_SHIFT 3
#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#ifndef _ASM_SN_SN2_IO_H
#define _ASM_SN_SN2_IO_H
+#include <linux/compiler.h>
+#include <asm/intrinsics.h>
-extern void * sn_io_addr(unsigned long port); /* Forward definition */
+extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */
-#include <asm/intrinsics.h>
#define __sn_mf_a() ia64_mfa()
* Specify the minimum PROM revsion required for this kernel.
* Note that they're stored in hex format...
*/
-#define SN_SAL_MIN_MAJOR 0x1 /* SN2 kernels need at least PROM 1.0 */
-#define SN_SAL_MIN_MINOR 0x0
+#define SN_SAL_MIN_MAJOR 0x3 /* SN2 kernels need at least PROM 3.40 */
+#define SN_SAL_MIN_MINOR 0x40
u64 ia64_sn_probe_io_slot(long paddr, long size, void *data_ptr);
#define __NR_syslog 1117
#define __NR_setitimer 1118
#define __NR_getitimer 1119
-/* 1120 was __NR_old_stat */
+#define __NR_tux 1120 /* was __NR_old_stat */
/* 1121 was __NR_old_lstat */
/* 1122 was __NR_old_fstat */
#define __NR_vhangup 1123
#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
-static inline void __constant_set_bit(int nr, unsigned long *vaddr)
+static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bset %1,%0"
: "+m" (*p) : "di" (nr & 7));
}
-static inline void __generic_set_bit(int nr, unsigned long *vaddr)
+static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfset %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
__generic_clear_bit(nr, vaddr))
#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
-static inline void __constant_clear_bit(int nr, unsigned long *vaddr)
+static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bclr %1,%0"
: "+m" (*p) : "di" (nr & 7));
}
-static inline void __generic_clear_bit(int nr, unsigned long *vaddr)
+static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfclr %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
#ifndef __M68K_HARDIRQ_H
#define __M68K_HARDIRQ_H
+#include <linux/config.h>
#include <linux/threads.h>
#include <linux/cache.h>
struct fp_ext temp[2];
};
-#if FPU_EMU_DEBUG
+#ifdef FPU_EMU_DEBUG
extern unsigned int fp_debugprint;
#define dprint(bit, fmt, args...) ({ \
#define _MOTOROLA_PGALLOC_H
#include <asm/tlb.h>
+#include <asm/tlbflush.h>
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* _M68K_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
atomic_t count;
atomic_t waking;
wait_queue_head_t wait;
-#if WAITQUEUE_DEBUG
+#ifdef WAITQUEUE_DEBUG
long __magic;
#endif
};
-#if WAITQUEUE_DEBUG
+#ifdef WAITQUEUE_DEBUG
# define __SEM_DEBUG_INIT(name) \
, (long)&(name).__magic
#else
{
register struct semaphore *sem1 __asm__ ("%a1") = sem;
-#if WAITQUEUE_DEBUG
+#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
register struct semaphore *sem1 __asm__ ("%a1") = sem;
register int result __asm__ ("%d0");
-#if WAITQUEUE_DEBUG
+#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
register struct semaphore *sem1 __asm__ ("%a1") = sem;
register int result __asm__ ("%d0");
-#if WAITQUEUE_DEBUG
+#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
{
register struct semaphore *sem1 __asm__ ("%a1") = sem;
-#if WAITQUEUE_DEBUG
+#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
__free_page(page);
}
-static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
-{
- tlb_remove_page(tlb, page);
-}
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
#endif /* __ASSEMBLY__ */
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* _M68KNOMMU_PAGE_H */
#define WANT_PAGE_VIRTUAL
#endif
+#define devmem_is_allowed(x) 1
+
#endif /* _ASM_PAGE_H */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* _PARISC_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
int *src_err, int *dst_err);
#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
- csum_partial_copy_generic((src), (dst), (len), (sum), (errp), 0)
+ csum_partial_copy_generic((src), (dst), (len), (sum), (errp), NULL)
/* FIXME: this needs to be written to really do no check -- Cort */
#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy_generic((src), (dst), (len), (sum), 0, 0)
+ csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
/*
* turns a 32-bit partial checksum (e.g. from csum_partial) into a
#define CPM_DATAONLY_SIZE ((uint)0x0700)
#define CPM_DP_NOSPACE ((uint)0x7fffffff)
+static inline long IS_DPERR(const uint offset)
+{
+ return (uint)offset > (uint)-1000L;
+}
+
/* Export the base address of the communication processor registers
* and dual port ram.
*/
extern cpm8xx_t *cpmp; /* Pointer to comm processor */
-extern void *m8xx_cpm_dpalloc(int size);
-extern int m8xx_cpm_dpfree(void *addr);
-extern void *m8xx_cpm_dpalloc_fixed(void *addr, int size);
-extern void m8xx_cpm_dpdump(void);
-extern int m8xx_cpm_dpram_offset(void *addr);
-extern void *m8xx_cpm_dpram_addr(int offset);
+extern uint cpm_dpalloc(uint size, uint align);
+extern int cpm_dpfree(uint offset);
+extern uint cpm_dpalloc_fixed(uint offset, uint size, uint align);
+extern void cpm_dpdump(void);
+extern void *cpm_dpram_addr(uint offset);
+extern void cpm_setbrg(uint brg, uint rate);
+
uint m8xx_cpm_hostalloc(uint size);
-void m8xx_cpm_setbrg(uint brg, uint rate);
/* Buffer descriptors used by many of the CPM protocols.
*/
*/
#define NUM_CPM_HOST_PAGES 2
+static inline long IS_DPERR(const uint offset)
+{
+ return (uint)offset > (uint)-1000L;
+}
/* Export the base address of the communication processor registers
* and dual port ram.
*/
extern cpm_cpm2_t *cpmp; /* Pointer to comm processor */
-extern void *cpm2_dpalloc(uint size, uint align);
-extern int cpm2_dpfree(void *addr);
-extern void *cpm2_dpalloc_fixed(void *addr, uint size, uint allign);
-extern void cpm2_dpdump(void);
-extern unsigned int cpm2_dpram_offset(void *addr);
-extern void *cpm2_dpram_addr(int offset);
-extern void cpm2_setbrg(uint brg, uint rate);
+extern uint cpm_dpalloc(uint size, uint align);
+extern int cpm_dpfree(uint offset);
+extern uint cpm_dpalloc_fixed(uint offset, uint size, uint align);
+extern void cpm_dpdump(void);
+extern void *cpm_dpram_addr(uint offset);
+extern void cpm_setbrg(uint brg, uint rate);
extern void cpm2_fastbrg(uint brg, uint rate, int div16);
/* Buffer descriptors used by many of the CPM protocols.
#define CPU_FTR_NO_DPM 0x00008000
#define CPU_FTR_HAS_HIGH_BATS 0x00010000
#define CPU_FTR_NEED_COHERENT 0x00020000
+#define CPU_FTR_NO_BTIC 0x00040000
#ifdef __ASSEMBLY__
#define O_LARGEFILE 0200000
#define O_DIRECT 0400000 /* direct disk access hint */
#define O_NOATIME 01000000
+#define O_ATOMICLOOKUP 01000000 /* tux hack */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
BUG_ON(!pte_none(*(kmap_pte+idx)));
#endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
- flush_tlb_page(0, vaddr);
+ flush_tlb_page(NULL, vaddr);
return (void*) vaddr;
}
* this pte without first remap it
*/
pte_clear(kmap_pte+idx);
- flush_tlb_page(0, vaddr);
+ flush_tlb_page(NULL, vaddr);
#endif
dec_preempt_count();
preempt_check_resched();
{
#ifndef CONFIG_APUS
if (address == 0)
- return 0;
+ return NULL;
return (void *)(address - PCI_DRAM_OFFSET + KERNELBASE);
#else
return (void*) mm_ptov (address);
return irq;
}
+#elif defined(CONFIG_CPM2) && defined(CONFIG_85xx)
+/* Now include the board configuration specific associations.
+*/
+#include <asm/mpc85xx.h>
+
+/* The MPC8560 openpic has 32 internal interrupts and 12 external
+ * interrupts.
+ *
+ * We are "flattening" the interrupt vectors of the cascaded CPM
+ * so that we can uniquely identify any interrupt source with a
+ * single integer.
+ */
+#define NR_CPM_INTS 64
+#define NR_EPIC_INTS 44
+#ifndef NR_8259_INTS
+#define NR_8259_INTS 0
+#endif
+#define NUM_8259_INTERRUPTS NR_8259_INTS
+
+#ifndef CPM_IRQ_OFFSET
+#define CPM_IRQ_OFFSET 0
+#endif
+
+#define NR_IRQS (NR_EPIC_INTS + NR_CPM_INTS + NR_8259_INTS)
+
+/* These values must be zero-based and map 1:1 with the EPIC configuration.
+ * They are used throughout the 8560 I/O subsystem to generate
+ * interrupt masks, flags, and other control patterns. This is why the
+ * current kernel assumption of the 8259 as the base controller is such
+ * a pain in the butt.
+ */
+
+#define SIU_INT_ERROR ((uint)0x00+CPM_IRQ_OFFSET)
+#define SIU_INT_I2C ((uint)0x01+CPM_IRQ_OFFSET)
+#define SIU_INT_SPI ((uint)0x02+CPM_IRQ_OFFSET)
+#define SIU_INT_RISC ((uint)0x03+CPM_IRQ_OFFSET)
+#define SIU_INT_SMC1 ((uint)0x04+CPM_IRQ_OFFSET)
+#define SIU_INT_SMC2 ((uint)0x05+CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER1 ((uint)0x0c+CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER2 ((uint)0x0d+CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER3 ((uint)0x0e+CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER4 ((uint)0x0f+CPM_IRQ_OFFSET)
+#define SIU_INT_FCC1 ((uint)0x20+CPM_IRQ_OFFSET)
+#define SIU_INT_FCC2 ((uint)0x21+CPM_IRQ_OFFSET)
+#define SIU_INT_FCC3 ((uint)0x22+CPM_IRQ_OFFSET)
+#define SIU_INT_MCC1 ((uint)0x24+CPM_IRQ_OFFSET)
+#define SIU_INT_MCC2 ((uint)0x25+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC1 ((uint)0x28+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC2 ((uint)0x29+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC3 ((uint)0x2a+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC4 ((uint)0x2b+CPM_IRQ_OFFSET)
+#define SIU_INT_PC15 ((uint)0x30+CPM_IRQ_OFFSET)
+#define SIU_INT_PC14 ((uint)0x31+CPM_IRQ_OFFSET)
+#define SIU_INT_PC13 ((uint)0x32+CPM_IRQ_OFFSET)
+#define SIU_INT_PC12 ((uint)0x33+CPM_IRQ_OFFSET)
+#define SIU_INT_PC11 ((uint)0x34+CPM_IRQ_OFFSET)
+#define SIU_INT_PC10 ((uint)0x35+CPM_IRQ_OFFSET)
+#define SIU_INT_PC9 ((uint)0x36+CPM_IRQ_OFFSET)
+#define SIU_INT_PC8 ((uint)0x37+CPM_IRQ_OFFSET)
+#define SIU_INT_PC7 ((uint)0x38+CPM_IRQ_OFFSET)
+#define SIU_INT_PC6 ((uint)0x39+CPM_IRQ_OFFSET)
+#define SIU_INT_PC5 ((uint)0x3a+CPM_IRQ_OFFSET)
+#define SIU_INT_PC4 ((uint)0x3b+CPM_IRQ_OFFSET)
+#define SIU_INT_PC3 ((uint)0x3c+CPM_IRQ_OFFSET)
+#define SIU_INT_PC2 ((uint)0x3d+CPM_IRQ_OFFSET)
+#define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET)
+#define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET)
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return irq;
+}
+
#else /* CONFIG_40x + CONFIG_8xx */
/*
* this is the # irq's for all ppc arch's (pmac/chrp/prep)
#define LAST_CONTEXT 255
#define FIRST_CONTEXT 1
-#elif CONFIG_E500
+#elif defined(CONFIG_E500)
#define NO_CONTEXT 256
#define LAST_CONTEXT 255
#define FIRST_CONTEXT 1
#include <platforms/sbs8260.h>
#endif
-#ifdef CONFIG_RPX6
-#include <platforms/rpxsuper.h>
+#ifdef CONFIG_RPX8260
+#include <platforms/rpx8260.h>
#endif
#ifdef CONFIG_WILLOW
#ifdef CONFIG_MPC8540_ADS
#include <platforms/85xx/mpc8540_ads.h>
#endif
+#ifdef CONFIG_MPC8555_CDS
+#include <platforms/85xx/mpc8555_cds.h>
+#endif
+#ifdef CONFIG_MPC8560_ADS
+#include <platforms/85xx/mpc8560_ads.h>
+#endif
#ifdef CONFIG_SBC8560
#include <platforms/85xx/sbc8560.h>
#endif
#define _IO_BASE isa_io_base
#define _ISA_MEM_BASE isa_mem_base
+#ifdef CONFIG_PCI
#define PCI_DRAM_OFFSET pci_dram_offset
+#else
+#define PCI_DRAM_OFFSET 0
+#endif
/*
* The "residual" board information structure the boot loader passes
#define OCP_VENDOR_ARM 0x0004
#define OCP_VENDOR_FREESCALE 0x1057
#define OCP_VENDOR_IBM 0x1014
-#define OCP_VENDOR_MARVELL 0x11ab
#define OCP_VENDOR_MOTOROLA OCP_VENDOR_FREESCALE
#define OCP_VENDOR_XILINX 0x10ee
#define OCP_VENDOR_UNKNOWN 0xFFFF
#define OCP_FUNC_16550 0x0031
#define OCP_FUNC_IIC 0x0032
#define OCP_FUNC_USB 0x0033
-#define OCP_FUNC_MPSC 0x0034
-#define OCP_FUNC_COMM_MPSC 0x0035
-#define OCP_FUNC_SDMA 0x0036
+#define OCP_FUNC_PSC_UART 0x0034
/* Memory devices 0x0090 - 0x009F */
#define OCP_FUNC_MAL 0x0090
#define OCP_FUNC_PERFMON 0x00D2 /* Performance Monitor */
#define OCP_FUNC_RGMII 0x00D3
#define OCP_FUNC_TAH 0x00D4
-#define OCP_FUNC_I2C 0x00D5 /* I2C Controller */
-#define OCP_FUNC_BRG 0x00D6 /* Baud Rate Generator */
-#define OCP_FUNC_PIC 0x00D7 /* Programmable Interrupt Controller */
/* Network 0x0200 - 0x02FF */
#define OCP_FUNC_EMAC 0x0200
-#define OCP_FUNC_ENET 0x0201 /* TSEC & FEC */
-#define OCP_FUNC_COMM_EMAC 0x0202
-#define OCP_FUNC_GFAR 0x0203 /* TSEC & FEC */
+#define OCP_FUNC_GFAR 0x0201 /* TSEC & FEC */
/* Bridge devices 0xE00 - 0xEFF */
#define OCP_FUNC_OPB 0x0E00
-#define OCP_FUNC_HB 0x0E01 /* Host bridge */
#define OCP_FUNC_UNKNOWN 0xFFFF
extern int openpic_get_irq(struct pt_regs *regs);
extern void openpic_reset_processor_phys(u_int cpumask);
extern void openpic_setup_ISU(int isu_num, unsigned long addr);
-extern void openpic_cause_IPI(u_int ipi, u_int cpumask);
+extern void openpic_cause_IPI(u_int ipi, cpumask_t cpumask);
extern void smp_openpic_message_pass(int target, int msg, unsigned long data,
int wait);
extern void openpic_set_k2_cascade(int irq);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* _PPC_PAGE_H */
unsigned long bi_sramsize; /* size of SRAM memory */
#if defined(CONFIG_8xx) || defined(CONFIG_CPM2) || defined(CONFIG_85xx)
unsigned long bi_immr_base; /* base of IMMR register */
+#endif
+#if defined(CONFIG_PPC_MPC52xx)
+ unsigned long bi_mbar_base; /* base of internal registers */
#endif
unsigned long bi_bootflags; /* boot / reboot flag (for LynxOS) */
unsigned long bi_ip_addr; /* IP Address */
unsigned long bi_brgfreq; /* BRG_CLK Freq, in MHz */
unsigned long bi_sccfreq; /* SCC_CLK Freq, in MHz */
unsigned long bi_vco; /* VCO Out from PLL, in MHz */
+#endif
+#if defined(CONFIG_PPC_MPC52xx)
+ unsigned long bi_ipbfreq; /* IPB Bus Freq, in MHz */
+ unsigned long bi_pcifreq; /* PCI Bus Freq, in MHz */
#endif
unsigned long bi_baudrate; /* Console Baudrate */
#if defined(CONFIG_405GP)
#define DBAT6U SPRN_DBAT6U /* Data BAT 6 Upper Register */
#define DBAT7L SPRN_DBAT7L /* Data BAT 7 Lower Register */
#define DBAT7U SPRN_DBAT7U /* Data BAT 7 Upper Register */
-#define DEC SPRN_DEC /* Decrement Register */
+//#define DEC SPRN_DEC /* Decrement Register */
#define DMISS SPRN_DMISS /* Data TLB Miss Register */
#define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
#define EAR SPRN_EAR /* External Address Register */
#define IMMR SPRN_IMMR /* PPC 860/821 Internal Memory Map Register */
#define L2CR SPRN_L2CR /* Classic PPC L2 cache control register */
#define L3CR SPRN_L3CR /* PPC 745x L3 cache control register */
-#define LR SPRN_LR
+//#define LR SPRN_LR
#define PVR SPRN_PVR /* Processor Version */
-#define RPA SPRN_RPA /* Required Physical Address Register */
+//#define RPA SPRN_RPA /* Required Physical Address Register */
#define SDR1 SPRN_SDR1 /* MMU hash base register */
#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
#define SPR1 SPRN_SPRG1
#define SVR_8555E 0x80790000
#define SVR_8560 0x80700000
+#if 0
/* Segment Registers */
#define SR0 0
#define SR1 1
#define SR13 13
#define SR14 14
#define SR15 15
+#endif
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#if defined(CONFIG_EV64260)
#include <platforms/ev64260.h>
-#elif defined(CONFIG_DMV182)
-#include <platforms/dmv182_serial.h>
#elif defined(CONFIG_GEMINI)
#include <platforms/gemini_serial.h>
#elif defined(CONFIG_POWERPMC250)
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
-typedef void (*__sighandler_t)(int);
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+
+typedef void __restorefn_t(void);
+typedef __restorefn_t __user *__sigrestore_t;
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
- void (*sa_restorer)(void);
+ __sigrestore_t sa_restorer;
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
- void (*sa_restorer)(void);
+ __sigrestore_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
};
typedef struct sigaltstack {
- void *ss_sp;
+ void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
((addr) <= current->thread.fs.seg \
&& ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr)))
-#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
+#define access_ok(type, addr, size) \
+ (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size)))
extern inline int verify_area(int type, const void __user * addr, unsigned long size)
{
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
+ __chk_user_ptr(ptr); \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err, __gu_val; \
+ __chk_user_ptr(ptr); \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
({ \
long __gu_err; \
long long __gu_val; \
+ __chk_user_ptr(ptr); \
__get_user_size64(__gu_val, (ptr), (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
- const __typeof__(*(ptr)) *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
({ \
long __gu_err = -EFAULT; \
long long __gu_val = 0; \
- const __typeof__(*(ptr)) *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
struct ucontext {
unsigned long uc_flags;
- struct ucontext *uc_link;
+ struct ucontext __user *uc_link;
stack_t uc_stack;
int uc_pad[7];
- struct mcontext *uc_regs; /* points to uc_mcontext field */
+ struct mcontext __user *uc_regs;/* points to uc_mcontext field */
sigset_t uc_sigmask;
/* glibc has 1024-bit signal masks, ours are 64-bit */
int uc_maskext[30];
out_be64(vaddr, val);
}
+#define EEH_CHECK_ALIGN(v,a) \
+ ((((unsigned long)(v)) & ((a) - 1)) == 0)
+
static inline void eeh_memset_io(void *addr, int c, unsigned long n) {
void *vaddr = (void *)IO_TOKEN_TO_ADDR(addr);
- memset(vaddr, c, n);
+ u32 lc = c;
+ lc |= lc << 8;
+ lc |= lc << 16;
+
+ while(n && !EEH_CHECK_ALIGN(vaddr, 4)) {
+ *((volatile u8 *)vaddr) = c;
+ vaddr = (void *)((unsigned long)vaddr + 1);
+ n--;
+ }
+ while(n >= 4) {
+ *((volatile u32 *)vaddr) = lc;
+ vaddr = (void *)((unsigned long)vaddr + 4);
+ n -= 4;
+ }
+ while(n) {
+ *((volatile u8 *)vaddr) = c;
+ vaddr = (void *)((unsigned long)vaddr + 1);
+ n--;
+ }
+ __asm__ __volatile__ ("sync" : : : "memory");
}
static inline void eeh_memcpy_fromio(void *dest, void *src, unsigned long n) {
void *vsrc = (void *)IO_TOKEN_TO_ADDR(src);
- memcpy(dest, vsrc, n);
+ void *vsrcsave = vsrc, *destsave = dest, *srcsave = src;
+ unsigned long nsave = n;
+
+ while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
+ *((u8 *)dest) = *((volatile u8 *)vsrc);
+ __asm__ __volatile__ ("eieio" : : : "memory");
+ vsrc = (void *)((unsigned long)vsrc + 1);
+ dest = (void *)((unsigned long)dest + 1);
+ n--;
+ }
+ while(n > 4) {
+ *((u32 *)dest) = *((volatile u32 *)vsrc);
+ __asm__ __volatile__ ("eieio" : : : "memory");
+ vsrc = (void *)((unsigned long)vsrc + 4);
+ dest = (void *)((unsigned long)dest + 4);
+ n -= 4;
+ }
+ while(n) {
+ *((u8 *)dest) = *((volatile u8 *)vsrc);
+ __asm__ __volatile__ ("eieio" : : : "memory");
+ vsrc = (void *)((unsigned long)vsrc + 1);
+ dest = (void *)((unsigned long)dest + 1);
+ n--;
+ }
+ __asm__ __volatile__ ("sync" : : : "memory");
+
/* Look for ffff's here at dest[n]. Assume that at least 4 bytes
* were copied. Check all four bytes.
*/
- if ((n >= 4) &&
- (EEH_POSSIBLE_ERROR(src, vsrc, (*((u32 *) dest+n-4)), u32))) {
- eeh_check_failure(src, (*((u32 *) dest+n-4)));
+ if ((nsave >= 4) &&
+ (EEH_POSSIBLE_ERROR(srcsave, vsrcsave, (*((u32 *) destsave+nsave-4)),
+ u32))) {
+ eeh_check_failure(srcsave, (*((u32 *) destsave+nsave-4)));
}
}
static inline void eeh_memcpy_toio(void *dest, void *src, unsigned long n) {
void *vdest = (void *)IO_TOKEN_TO_ADDR(dest);
- memcpy(vdest, src, n);
+
+ while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
+ *((volatile u8 *)vdest) = *((u8 *)src);
+ src = (void *)((unsigned long)src + 1);
+ vdest = (void *)((unsigned long)vdest + 1);
+ n--;
+ }
+ while(n > 4) {
+ *((volatile u32 *)vdest) = *((volatile u32 *)src);
+ src = (void *)((unsigned long)src + 4);
+ vdest = (void *)((unsigned long)vdest + 4);
+ n-=4;
+ }
+ while(n) {
+ *((volatile u8 *)vdest) = *((u8 *)src);
+ src = (void *)((unsigned long)src + 1);
+ vdest = (void *)((unsigned long)vdest + 1);
+ n--;
+ }
+ __asm__ __volatile__ ("sync" : : : "memory");
}
+#undef EEH_CHECK_ALIGN
+
#define MAX_ISA_PORT 0x10000
extern unsigned long io_page_mask;
#define _IO_IS_VALID(port) ((port) >= MAX_ISA_PORT || (1 << (port>>PAGE_SHIFT)) & io_page_mask)
#define O_LARGEFILE 0200000
#define O_DIRECT 0400000 /* direct disk access hint */
#define O_NOATIME 01000000
+#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/config.h>
#include <linux/threads.h>
-#include <asm/atomic.h>
/*
* Maximum number of interrupt sources that we can handle.
return virt_irq_to_real_map[virt_irq];
}
+extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
+
/*
* Because many systems have two overlapping names spaces for
* interrupts (ISA and XICS for example), and the ISA interrupts
mm_context_t ctx = { .id = REGION_ID(ea), KERNEL_LOW_HPAGES}; \
ctx; })
-/*
- * Hardware Segment Lookaside Buffer Entry
- * This structure has been padded out to two 64b doublewords (actual SLBE's are
- * 94 bits). This padding facilites use by the segment management
- * instructions.
- */
typedef struct {
unsigned long esid: 36; /* Effective segment ID */
unsigned long resv0:20; /* Reserved */
} dw1;
} STE;
-typedef struct {
- unsigned long esid: 36; /* Effective segment ID */
- unsigned long v: 1; /* Entry valid (v=1) or invalid */
- unsigned long null1:15; /* padding to a 64b boundary */
- unsigned long index:12; /* Index to select SLB entry. Used by slbmte */
-} slb_dword0;
-
-typedef struct {
- unsigned long vsid: 52; /* Virtual segment ID */
- unsigned long ks: 1; /* Supervisor (privileged) state storage key */
- unsigned long kp: 1; /* Problem state storage key */
- unsigned long n: 1; /* No-execute if n=1 */
- unsigned long l: 1; /* Virt pages are large (l=1) or 4KB (l=0) */
- unsigned long c: 1; /* Class */
- unsigned long resv0: 7; /* Padding to a 64b boundary */
-} slb_dword1;
-
-typedef struct {
- union {
- unsigned long dword0;
- slb_dword0 dw0;
- } dw0;
-
- union {
- unsigned long dword1;
- slb_dword1 dw1;
- } dw1;
-} SLBE;
-
/* Hardware Page Table Entry */
#define HPTES_PER_GROUP 8
#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
#define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
+#define SLB_NUM_BOLTED 2
+#define SLB_CACHE_ENTRIES 8
+
+/* Bits in the SLB ESID word */
+#define SLB_ESID_V 0x0000000008000000 /* entry is valid */
+
+/* Bits in the SLB VSID word */
+#define SLB_VSID_SHIFT 12
+#define SLB_VSID_KS 0x0000000000000800
+#define SLB_VSID_KP 0x0000000000000400
+#define SLB_VSID_N 0x0000000000000200 /* no-execute */
+#define SLB_VSID_L 0x0000000000000100 /* largepage (4M) */
+#define SLB_VSID_C 0x0000000000000080 /* class */
+
+#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
+#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
+
+#define VSID_RANDOMIZER ASM_CONST(42470972311)
+#define VSID_MASK 0xfffffffffUL
+/* Because we never access addresses below KERNELBASE as kernel
+ * addresses, this VSID is never used for anything real, and will
+ * never have pages hashed into it */
+#define BAD_VSID ASM_CONST(0)
+
/* Block size masks */
#define BL_128K 0x000
#define BL_256K 0x001
}
extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
-extern void flush_slb(struct task_struct *tsk, struct mm_struct *mm);
+extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
/*
* switch_mm is the entry point called from the architecture independent
return;
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
- flush_slb(tsk, next);
+ switch_slb(tsk, next);
else
flush_stab(tsk, next);
}
local_irq_restore(flags);
}
-#define VSID_RANDOMIZER 42470972311UL
-#define VSID_MASK 0xfffffffffUL
-
-
/* This is only valid for kernel (including vmalloc, imalloc and bolted) EA's
*/
static inline unsigned long
u64 exmc[8]; /* used for machine checks */
u64 exslb[8]; /* used for SLB/segment table misses
* on the linear mapping */
- u64 exdsi[8]; /* used for linear mapping hash table misses */
+ u64 slb_r3; /* spot to save R3 on SLB miss */
+ mm_context_t context;
+ u16 slb_cache[SLB_CACHE_ENTRIES];
+ u16 slb_cache_ptr;
/*
* then miscellaneous read-write fields
*/
struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */
- u64 stab_next_rr; /* stab/slb round-robin counter */
+ u64 stab_rr; /* stab/slb round-robin counter */
u64 next_jiffy_update_tb; /* TB value for next jiffy update */
u64 saved_r1; /* r1 save for RTAS calls */
u64 saved_msr; /* MSR saved here by enter_rtas */
u32 lpevent_count; /* lpevents processed */
u8 proc_enabled; /* irq soft-enable flag */
+ /* not yet used */
+ u64 exdsi[8]; /* used for linear mapping hash table misses */
+
/*
* iSeries structues which the hypervisor knows about - Not
* sure if these particularly need to be cacheline aligned.
#define SID_SHIFT 28
#define SID_MASK 0xfffffffffUL
+#define ESID_MASK 0xfffffffff0000000UL
#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
#ifdef CONFIG_HUGETLB_PAGE
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
/* For 64-bit processes the hugepage range is 1T-1.5T */
-#define TASK_HPAGE_BASE (0x0000010000000000UL)
-#define TASK_HPAGE_END (0x0000018000000000UL)
+#define TASK_HPAGE_BASE ASM_CONST(0x0000010000000000)
+#define TASK_HPAGE_END ASM_CONST(0x0000018000000000)
#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
- (1U << GET_ESID(addr))) & 0xffff)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* _PPC64_PAGE_H */
* the PCI memory space in the CPU bus space
*/
unsigned long pci_mem_offset;
- unsigned long pci_io_offset;
struct pci_ops *ops;
volatile unsigned int *cfg_addr;
#define PVR SPRN_PVR /* Processor Version */
#define PIR SPRN_PIR /* Processor ID */
#define PURR SPRN_PURR /* Processor Utilization of Resource Register */
-#define RPA SPRN_RPA /* Required Physical Address Register */
+//#define RPA SPRN_RPA /* Required Physical Address Register */
#define SDR1 SPRN_SDR1 /* MMU hash base register */
#define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
#define SPR1 SPRN_SPRG1
extern void print_properties(struct device_node *node);
extern int prom_n_addr_cells(struct device_node* np);
extern int prom_n_size_cells(struct device_node* np);
+extern int prom_n_intr_cells(struct device_node* np);
extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
extern void prom_add_property(struct device_node* np, struct property* prop);
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#define __get_user_check(x,ptr,size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
- const __typeof__(*(ptr)) *__gu_addr = (ptr); \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);\
(x) = (__typeof__(*(ptr)))__gu_val; \
void xics_setup_cpu(void);
void xics_cause_IPI(int cpu);
+/* first argument is ignored for now*/
+void pSeriesLP_cppr_info(int n_cpu, u8 value);
+
struct xics_ipi_struct {
volatile unsigned long value;
} ____cacheline_aligned;
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
+#define O_ATOMICLOOKUP 02000000 /* do atomic file lookup (tux) */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* _S390_PAGE_H */
#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
typedef struct {
__u32 ar4;
} mm_segment_t;
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#endif
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PAGE_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
#endif
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH64_PAGE_H */
#define O_DIRECTORY 0x10000 /* must be a directory */
#define O_NOFOLLOW 0x20000 /* don't follow links */
#define O_LARGEFILE 0x40000
+#define O_ATOMICLOOKUP 0x80000 /* do atomic file lookup */
#define O_DIRECT 0x100000 /* direct disk access hint */
#define O_NOATIME 0x200000
#ifndef _SPARC_OPENPROMIO_H
#define _SPARC_OPENPROMIO_H
+#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <linux/types.h>
{
int op_nodeid; /* PROM Node ID (value-result) */
int op_namelen; /* Length of op_name. */
- char *op_name; /* Pointer to the property name. */
+ char __user *op_name; /* Pointer to the property name. */
int op_buflen; /* Length of op_buf (value-result) */
- char *op_buf; /* Pointer to buffer. */
+ char __user *op_buf; /* Pointer to buffer. */
};
#define OPIOCGET _IOWR('O', 1, struct opiocdesc)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
+
#endif /* __KERNEL__ */
#endif /* _SPARC_PAGE_H */
extern void pci_unmap_page(struct pci_dev *hwdev,
dma_addr_t dma_address, size_t size, int direction);
-/* map_page and map_single cannot fail */
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
-{
- return 0;
-}
-
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
{
}
+#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
+static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return (dma_addr == PCI_DMA_ERROR_CODE);
+}
+
#endif /* __KERNEL__ */
/* generic pci stuff */
{ 0, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{INR_OPEN, INR_OPEN}, {0, 0}, \
- {PAGE_SIZE, PAGE_SIZE}, \
+ {32768, 32768}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{MAX_SIGPENDING, MAX_SIGPENDING}, \
#ifndef __LINUX_FBIO_H
#define __LINUX_FBIO_H
+#include <linux/compiler.h>
+
/* Constants used for fbio SunOS compatibility */
/* (C) 1996 Miguel de Icaza */
struct fbcmap {
int index; /* first element (0 origin) */
int count;
- unsigned char *red;
- unsigned char *green;
- unsigned char *blue;
+ unsigned char __user *red;
+ unsigned char __user *green;
+ unsigned char __user *blue;
};
#ifdef __KERNEL__
#define O_DIRECTORY 0x10000 /* must be a directory */
#define O_NOFOLLOW 0x20000 /* don't follow links */
#define O_LARGEFILE 0x40000
+#define O_ATOMICLOOKUP 0x80000 /* do atomic file lookup */
#define O_DIRECT 0x100000 /* direct disk access hint */
#define O_NOATIME 0x200000
volatile int doing_pdma = 0;
/* This is software state */
-char *pdma_base = 0;
+char *pdma_base = NULL;
unsigned long pdma_areasize;
/* Common routines to all controller types on the Sparc. */
doing_pdma = 0;
if (pdma_base) {
mmu_unlockarea(pdma_base, pdma_areasize);
- pdma_base = 0;
+ pdma_base = NULL;
}
}
} else {
#ifdef CONFIG_PCI
struct linux_ebus *ebus;
- struct linux_ebus_device *edev = 0;
+ struct linux_ebus_device *edev = NULL;
unsigned long config = 0;
unsigned long auxio_reg;
#ifndef _SPARC64_OPENPROMIO_H
#define _SPARC64_OPENPROMIO_H
+#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <linux/types.h>
{
int op_nodeid; /* PROM Node ID (value-result) */
int op_namelen; /* Length of op_name. */
- char *op_name; /* Pointer to the property name. */
+ char __user *op_name; /* Pointer to the property name. */
int op_buflen; /* Length of op_buf (value-result) */
- char *op_buf; /* Pointer to buffer. */
+ char __user *op_buf; /* Pointer to buffer. */
};
#define OPIOCGET _IOWR('O', 1, struct opiocdesc)
#define clear_page(X) _clear_page((void *)(X))
struct page;
extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
-#define copy_page(X,Y) __memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
+#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
/* GROSS, defining this makes gcc pass these types as aggregates,
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define devmem_is_allowed(x) 1
+
#endif /* !(__KERNEL__) */
#endif /* !(_SPARC64_PAGE_H) */
{ 0, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{INR_OPEN, INR_OPEN}, {0, 0}, \
- {PAGE_SIZE, PAGE_SIZE }, \
+ {32768, 32768 }, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{RLIM_INFINITY, RLIM_INFINITY}, \
{MAX_SIGPENDING, MAX_SIGPENDING}, \
#ifdef __KERNEL__
+#include <linux/config.h>
#include <linux/compat.h>
+#ifdef CONFIG_COMPAT
+
typedef union sigval32 {
int sival_int;
u32 sival_ptr;
} _sigpoll;
} _sifields;
} siginfo_t32;
+#endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */
#ifdef __KERNEL__
+#ifdef CONFIG_COMPAT
+
typedef struct sigevent32 {
sigval_t32 sigev_value;
int sigev_signo;
extern int copy_siginfo_to_user32(siginfo_t32 __user *to, siginfo_t *from);
+#endif /* CONFIG_COMPAT */
+
#endif /* __KERNEL__ */
#endif
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
+#include <linux/config.h>
#include <linux/personality.h>
#include <linux/types.h>
#include <linux/compat.h>
};
#ifdef __KERNEL__
+
+#ifdef CONFIG_COMPAT
struct __new_sigaction32 {
unsigned sa_handler;
unsigned int sa_flags;
unsigned sa_restorer; /* not used by Linux/SPARC yet */
compat_sigset_t sa_mask;
};
+#endif
struct k_sigaction {
struct __new_sigaction sa;
};
#ifdef __KERNEL__
+
+#ifdef CONFIG_COMPAT
struct __old_sigaction32 {
unsigned sa_handler;
compat_old_sigset_t sa_mask;
};
#endif
+#endif
+
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
} stack_t;
#ifdef __KERNEL__
+
+#ifdef CONFIG_COMPAT
typedef struct sigaltstack32 {
u32 ss_sp;
int ss_flags;
compat_size_t ss_size;
} stack_t32;
+#endif
struct signal_deliver_cookie {
int restart_syscall;
do { membar("#LoadLoad"); \
} while(*((volatile unsigned char *)lock))
-static __inline__ void _raw_spin_lock(spinlock_t *lock)
-{
- __asm__ __volatile__(
-"1: ldstub [%0], %%g7\n"
-" brnz,pn %%g7, 2f\n"
-" membar #StoreLoad | #StoreStore\n"
-" .subsection 2\n"
-"2: ldub [%0], %%g7\n"
-" brnz,pt %%g7, 2b\n"
-" membar #LoadLoad\n"
-" b,a,pt %%xcc, 1b\n"
-" .previous\n"
- : /* no outputs */
- : "r" (lock)
- : "g7", "memory");
-}
+/* arch/sparc64/lib/spinlock.S */
+extern void _raw_spin_lock(spinlock_t *lock);
static __inline__ int _raw_spin_trylock(spinlock_t *lock)
{
#include <asm/asi.h>
extern void __memmove(void *,const void *,__kernel_size_t);
-extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
extern void *__memset(void *,int,__kernel_size_t);
-extern void *__builtin_memcpy(void *,const void *,__kernel_size_t);
extern void *__builtin_memset(void *,int,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS
#define __HAVE_ARCH_MEMCPY
-static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
-{
- if(n) {
- if(n <= 32) {
- __builtin_memcpy(to, from, n);
- } else {
- __memcpy(to, from, n);
- }
- }
- return to;
-}
-
-static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
-{
- __memcpy(to, from, n);
- return to;
-}
-
-#undef memcpy
-#define memcpy(t, f, n) \
-(__builtin_constant_p(n) ? \
- __constant_memcpy((t),(f),(n)) : \
- __nonconstant_memcpy((t),(f),(n)))
+extern void * memcpy(void *,const void *,__kernel_size_t);
#define __HAVE_ARCH_MEMSET
#else
#define SUNOS_SYSCALL_TRAP TRAP(sunos_syscall)
#endif
+#ifdef CONFIG_COMPAT
#define LINUX_32BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sys_call_table32)
+#else
+#define LINUX_32BIT_SYSCALL_TRAP BTRAP(0x110)
+#endif
#define LINUX_64BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall, sys_call_table64)
#define GETCC_TRAP TRAP(getcc)
#define SETCC_TRAP TRAP(setcc)
extern struct page *arch_validate(struct page *page, int mask, int order);
#define HAVE_ARCH_VALIDATE
+#define devmem_is_allowed(x) 1
#endif
return result + generic_ffs_for_find_next_bit(tmp);
}
+/*
+ * find_first_bit - find the first set bit in a memory region
+ */
+#define find_first_bit(addr, size) \
+ find_next_bit((addr), (size), 0)
+
#define ffs(x) generic_ffs (x)
#define fls(x) generic_fls (x)
#define __va(x) ((void *)__phys_to_virt ((unsigned long)(x)))
+#define devmem_is_allowed(x) 1
+
#endif /* KERNEL */
#endif /* __V850_PAGE_H__ */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE, PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffff
+#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
+#define O_ATOMICLOOKUP 02000000 /* TUX */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#ifdef CONFIG_X86_IO_APIC
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
static inline int use_pci_vector(void) {return 1;}
static inline void disable_edge_ioapic_vector(unsigned int vector) { }
static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
-#ifdef CONFIG_PCI_USE_VECTOR
+#ifdef CONFIG_PCI_MSI
#define NR_IRQS FIRST_SYSTEM_VECTOR
#define NR_IRQ_VECTORS NR_IRQS
#else
};
extern unsigned char mp_bus_id_to_type [MAX_MP_BUSSES];
extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
-extern cpumask_t mp_bus_to_cpumask [MAX_MP_BUSSES];
+extern cpumask_t pci_bus_to_cpumask [256];
extern unsigned int boot_cpu_physical_apicid;
extern int smp_found_config;
struct task_struct;
struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
int in_gate_area(struct task_struct *task, unsigned long addr);
+extern int devmem_is_allowed(unsigned long pagenr);
#endif
#endif /* __KERNEL__ */
#define MCA_bus__is_a_macro
-/*
- * User space process size: 512GB - 1GB (default).
- */
-#define TASK_SIZE (0x0000007fc0000000UL)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
#define TASK_UNMAPPED_BASE \
(test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
+
+/*
+ * User space process size: 512GB - 1GB (default).
+ */
+#define TASK_SIZE_64 (0x0000007fc0000000UL)
+
+#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE_64)
+
/*
* Size of io_bitmap, covering ports 0 to 0x3ff.
*/
#define ARCH_HAS_SCHED_WAKE_IDLE
#endif
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
#endif /* __ASM_X86_64_PROCESSOR_H */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ INR_OPEN, INR_OPEN }, \
- { PAGE_SIZE , PAGE_SIZE }, \
+ { 32768, 32768 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ MAX_SIGPENDING, MAX_SIGPENDING }, \
static inline cpumask_t pcibus_to_cpumask(int bus)
{
- cpumask_t tmp;
- cpus_and(tmp, mp_bus_to_cpumask[bus], cpu_online_map);
- return tmp;
+ cpumask_t res;
+ cpus_and(res, pci_bus_to_cpumask[bus], cpu_online_map);
+ return res;
}
#define NODE_BALANCE_RATE 30 /* CHECKME */
#define __put_user_check(x,ptr,size) \
({ \
int __pu_err = -EFAULT; \
- __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
if (likely(access_ok(VERIFY_WRITE,__pu_addr,size))) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
#include <linux/types.h>
#include <asm/param.h>
+#include <asm/byteorder.h>
/*
* comp_t is a 16-bit "floating" point number with a 3-bit base 8
#define ACOMPAT 0x04 /* ... used compatibility mode (VAX only not used) */
#define ACORE 0x08 /* ... dumped core */
#define AXSIG 0x10 /* ... was killed by a signal */
-#define ABYTESEX 0x80 /* always set, allows to detect byteorder */
+
+#ifdef __BIG_ENDIAN
+#define ACCT_BYTEORDER 0x80 /* accounting file is big endian */
+#else
+#define ACCT_BYTEORDER 0x00 /* accounting file is little endian */
+#endif
#ifdef __KERNEL__
#define ata_id_has_lba48(dev) ((dev)->id[83] & (1 << 10))
#define ata_id_has_wcache(dev) ((dev)->id[82] & (1 << 5))
#define ata_id_has_pm(dev) ((dev)->id[82] & (1 << 3))
-#define ata_id_has_lba(dev) ((dev)->id[49] & (1 << 8))
-#define ata_id_has_dma(dev) ((dev)->id[49] & (1 << 9))
+#define ata_id_has_lba(dev) ((dev)->id[49] & (1 << 9))
+#define ata_id_has_dma(dev) ((dev)->id[49] & (1 << 8))
#define ata_id_removeable(dev) ((dev)->id[0] & (1 << 7))
#define ata_id_u32(dev,n) \
(((u32) (dev)->id[(n) + 1] << 16) | ((u32) (dev)->id[(n)]))
struct atm_iobuf {
int length;
- void *buffer;
+ void __user *buffer;
};
/* for ATM_GETCIRANGE / ATM_SETCIRANGE */
#define ATM_CI_MAX -1 /* use maximum range of VPI/VCI */
struct atm_cirange {
- char vpi_bits; /* 1..8, ATM_CI_MAX (-1) for maximum */
- char vci_bits; /* 1..16, ATM_CI_MAX (-1) for maximum */
+ signed char vpi_bits; /* 1..8, ATM_CI_MAX (-1) for maximum */
+ signed char vci_bits; /* 1..16, ATM_CI_MAX (-1) for maximum */
};
/* for ATM_SETSC; actually taken from the ATM_VF number space */
*
* ATM Lan Emulation Daemon vs. driver interface
*
- * carnil@cs.tut.fi
+ * mkiiskila@yahoo.com
*
*/
*/
#define CONFIG_EXPERIMENTAL 1
#define CONFIG_CLEAN_COMPILE 1
-#define CONFIG_STANDALONE 1
#define CONFIG_BROKEN_ON_SMP 1
/*
/*
* Generic Driver Options
*/
+#define CONFIG_STANDALONE 1
#define CONFIG_PREVENT_FIRMWARE_BUILD 1
#define CONFIG_FW_LOADER 1
#undef CONFIG_DEBUG_DRIVER
#define CONFIG_MTD_PARTITIONS_MODULE 1
#define CONFIG_MTD_CONCAT_MODULE 1
#define CONFIG_MTD_REDBOOT_PARTS_MODULE 1
-#define CONFIG_MTD_CMDLINE_PARTS_MODULE 1
+#undef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+#undef CONFIG_MTD_REDBOOT_PARTS_READONLY
/*
* User Modules And Translation Layers
#define CONFIG_MTD_JEDECPROBE_MODULE 1
#define CONFIG_MTD_GEN_PROBE_MODULE 1
#undef CONFIG_MTD_CFI_ADV_OPTIONS
+#define CONFIG_MTD_MAP_BANK_WIDTH_1 1
+#define CONFIG_MTD_MAP_BANK_WIDTH_2 1
+#define CONFIG_MTD_MAP_BANK_WIDTH_4 1
+#undef CONFIG_MTD_MAP_BANK_WIDTH_8
+#undef CONFIG_MTD_MAP_BANK_WIDTH_16
+#undef CONFIG_MTD_MAP_BANK_WIDTH_32
+#define CONFIG_MTD_CFI_I1 1
+#define CONFIG_MTD_CFI_I2 1
+#undef CONFIG_MTD_CFI_I4
+#undef CONFIG_MTD_CFI_I8
#define CONFIG_MTD_CFI_INTELEXT_MODULE 1
#define CONFIG_MTD_CFI_AMDSTD_MODULE 1
+#define CONFIG_MTD_CFI_AMDSTD_RETRY 3
#define CONFIG_MTD_CFI_STAA_MODULE 1
+#define CONFIG_MTD_CFI_UTIL_MODULE 1
#define CONFIG_MTD_RAM_MODULE 1
#define CONFIG_MTD_ROM_MODULE 1
#define CONFIG_MTD_ABSENT_MODULE 1
-#undef CONFIG_MTD_OBSOLETE_CHIPS
/*
* Mapping drivers for chip access
#define CONFIG_MTD_ELAN_104NC_MODULE 1
#define CONFIG_MTD_SCx200_DOCFLASH_MODULE 1
#define CONFIG_MTD_AMD76XROM_MODULE 1
-#define CONFIG_MTD_ICH2ROM_MODULE 1
+#undef CONFIG_MTD_ICHXROM
#define CONFIG_MTD_SCB2_FLASH_MODULE 1
#undef CONFIG_MTD_NETtel
#undef CONFIG_MTD_DILNETPC
#undef CONFIG_MTD_PMC551_BUGFIX
#undef CONFIG_MTD_PMC551_DEBUG
#undef CONFIG_MTD_SLRAM
+#undef CONFIG_MTD_PHRAM
#define CONFIG_MTD_MTDRAM_MODULE 1
#define CONFIG_MTDRAM_TOTAL_SIZE 4096
#define CONFIG_MTDRAM_ERASE_SIZE 128
#undef CONFIG_MTD_DOC2001
#define CONFIG_MTD_DOC2001PLUS_MODULE 1
#define CONFIG_MTD_DOCPROBE_MODULE 1
+#define CONFIG_MTD_DOCECC_MODULE 1
#undef CONFIG_MTD_DOCPROBE_ADVANCED
#define CONFIG_MTD_DOCPROBE_ADDRESS 0x0
#define CONFIG_MTD_NAND_MODULE 1
#undef CONFIG_MTD_NAND_VERIFY_WRITE
#define CONFIG_MTD_NAND_IDS_MODULE 1
+#undef CONFIG_MTD_NAND_DISKONCHIP
/*
* Parallel port support
#define CONFIG_BLK_DEV_IDECD 1
#define CONFIG_BLK_DEV_IDETAPE_MODULE 1
#define CONFIG_BLK_DEV_IDEFLOPPY 1
-#undef CONFIG_BLK_DEV_IDESCSI
+#define CONFIG_BLK_DEV_IDESCSI_MODULE 1
#undef CONFIG_IDE_TASK_IOCTL
#undef CONFIG_IDE_TASKFILE_IO
* QoS and/or fair queueing
*/
#define CONFIG_NET_SCHED 1
+#define CONFIG_NET_SCH_CLK_JIFFIES 1
+#undef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+#undef CONFIG_NET_SCH_CLK_CPU
#define CONFIG_NET_SCH_CBQ_MODULE 1
#define CONFIG_NET_SCH_HTB_MODULE 1
#define CONFIG_NET_SCH_HFSC_MODULE 1
#define CONFIG_BT_HCIBLUECARD_MODULE 1
#define CONFIG_BT_HCIBTUART_MODULE 1
#define CONFIG_BT_HCIVHCI_MODULE 1
+#define CONFIG_TUX_MODULE 1
+
+/*
+ * TUX options
+ */
+#define CONFIG_TUX_EXTCGI 1
+#undef CONFIG_TUX_EXTENDED_LOG
+#undef CONFIG_TUX_DEBUG
#define CONFIG_NETDEVICES 1
#define CONFIG_DUMMY_MODULE 1
#define CONFIG_BONDING_MODULE 1
#define CONFIG_FB_HGA_ACCEL 1
#define CONFIG_FB_RIVA_MODULE 1
#undef CONFIG_FB_RIVA_I2C
+#undef CONFIG_FB_RIVA_DEBUG
#define CONFIG_FB_I810_MODULE 1
#define CONFIG_FB_I810_GTF 1
#define CONFIG_FB_MATROX_MODULE 1
#define CONFIG_JFFS2_FS_MODULE 1
#define CONFIG_JFFS2_FS_DEBUG 0
#define CONFIG_JFFS2_FS_NAND 1
+#undef CONFIG_JFFS2_COMPRESSION_OPTIONS
+#define CONFIG_JFFS2_ZLIB 1
+#define CONFIG_JFFS2_RTIME 1
+#undef CONFIG_JFFS2_RUBIN
#define CONFIG_CRAMFS_MODULE 1
#define CONFIG_VXFS_FS_MODULE 1
#undef CONFIG_HPFS_FS
#undef CONFIG_SMB_NLS_DEFAULT
#define CONFIG_CIFS_MODULE 1
#undef CONFIG_CIFS_STATS
+#define CONFIG_CIFS_XATTR 1
#define CONFIG_CIFS_POSIX 1
#define CONFIG_NCP_FS_MODULE 1
#define CONFIG_NCPFS_PACKET_SIGNING 1
#define CONFIG_EARLY_PRINTK 1
#define CONFIG_DEBUG_STACKOVERFLOW 1
#undef CONFIG_DEBUG_STACK_USAGE
-#define CONFIG_DEBUG_SLAB 1
+#undef CONFIG_DEBUG_SLAB
#define CONFIG_MAGIC_SYSRQ 1
-#define CONFIG_DEBUG_SPINLOCK 1
+#undef CONFIG_DEBUG_SPINLOCK
#undef CONFIG_DEBUG_PAGEALLOC
-#define CONFIG_DEBUG_HIGHMEM 1
+#undef CONFIG_DEBUG_HIGHMEM
#define CONFIG_DEBUG_INFO 1
#define CONFIG_DEBUG_SPINLOCK_SLEEP 1
#undef CONFIG_FRAME_POINTER
#define CONFIG_CRYPTO_HMAC 1
#define CONFIG_CRYPTO_NULL_MODULE 1
#define CONFIG_CRYPTO_MD4_MODULE 1
-#define CONFIG_CRYPTO_MD5 1
+#define CONFIG_CRYPTO_MD5_MODULE 1
#define CONFIG_CRYPTO_SHA1 1
#define CONFIG_CRYPTO_SHA256_MODULE 1
#define CONFIG_CRYPTO_SHA512_MODULE 1
#define CONFIG_CRYPTO_BLOWFISH_MODULE 1
#define CONFIG_CRYPTO_TWOFISH_MODULE 1
#define CONFIG_CRYPTO_SERPENT_MODULE 1
-#define CONFIG_CRYPTO_AES_MODULE 1
+#define CONFIG_CRYPTO_AES_586_MODULE 1
#define CONFIG_CRYPTO_CAST5_MODULE 1
#define CONFIG_CRYPTO_CAST6_MODULE 1
#define CONFIG_CRYPTO_TEA_MODULE 1
#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
#define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
+#define BIO_USER_MAPPED 6 /* contains user pages */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
extern int bio_get_nr_vecs(struct block_device *);
extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
unsigned long, unsigned int, int);
-extern void bio_unmap_user(struct bio *, int);
+extern void bio_unmap_user(struct bio *);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
+extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
+extern int bio_uncopy_user(struct bio *);
#ifdef CONFIG_HIGHMEM
/*
extern void blk_run_queue(request_queue_t *);
extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
extern struct request *blk_rq_map_user(request_queue_t *, int, void __user *, unsigned int);
-extern int blk_rq_unmap_user(struct request *, void __user *, struct bio *, unsigned int);
+extern int blk_rq_unmap_user(struct request *, struct bio *, unsigned int);
extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *);
static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
int generic_cont_expand(struct inode *inode, loff_t size) ;
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_sync_page(struct page *);
+void flush_inode_pages (struct inode * inode);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
* data structure for /proc/sys/... files
*/
int do_reset_coda_vfs_stats( ctl_table * table, int write, struct file * filp,
- void __user * buffer, size_t * lenp );
+ void __user * buffer, size_t * lenp, loff_t * ppos );
int do_reset_coda_cache_inv_stats( ctl_table * table, int write,
struct file * filp, void __user * buffer,
- size_t * lenp );
+ size_t * lenp, loff_t * ppos );
/* these functions are called to form the content of /proc/fs/coda/... files */
int coda_vfs_stats_get_info( char * buffer, char ** start, off_t offset,
COMPATIBLE_IOCTL(DVD_AUTH)
/* Big L */
ULONG_IOCTL(LOOP_SET_FD)
+ULONG_IOCTL(LOOP_CHANGE_FD)
COMPATIBLE_IOCTL(LOOP_CLR_FD)
COMPATIBLE_IOCTL(LOOP_GET_STATUS64)
COMPATIBLE_IOCTL(LOOP_SET_STATUS64)
struct vc_data;
struct console_font_op;
+struct console_font;
struct module;
/*
void (*con_bmove)(struct vc_data *, int, int, int, int, int, int);
int (*con_switch)(struct vc_data *);
int (*con_blank)(struct vc_data *, int, int);
- int (*con_font_op)(struct vc_data *, struct console_font_op *);
+ int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
+ int (*con_font_get)(struct vc_data *, struct console_font *);
+ int (*con_font_default)(struct vc_data *, struct console_font *, char *);
+ int (*con_font_copy)(struct vc_data *, int);
int (*con_resize)(struct vc_data *, unsigned int, unsigned int);
int (*con_set_palette)(struct vc_data *, unsigned char *);
int (*con_scrolldelta)(struct vc_data *, int);
unsigned long vc_pos; /* Cursor address */
/* fonts */
unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */
- struct console_font_op vc_font; /* Current VC font set */
+ struct console_font vc_font; /* Current VC font set */
unsigned short vc_video_erase_char; /* Background erase character */
/* VT terminal data */
unsigned int vc_state; /* Escape sequence parser state */
struct super_block *d_sb; /* The root of the dentry tree */
int d_mounted;
void *d_fsdata; /* fs-specific data */
+ void * d_extra_attributes; /* TUX-specific data */
struct rcu_head d_rcu;
struct dcookie_struct *d_cookie; /* cookie, if any */
struct hlist_node d_hash; /* lookup hash list */
extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_anon(struct hlist_head *);
extern int d_invalidate(struct dentry *);
+extern void flush_dentry_attributes(void);
/* only used at mount-time */
extern struct dentry * d_alloc_root(struct inode *);
/* validate "insecure" dentry pointer */
extern int d_validate(struct dentry *, struct dentry *);
+char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
+ struct dentry *root, struct vfsmount *rootmnt,
+ char *buffer, int buflen);
+
extern char * d_path(struct dentry *, struct vfsmount *, char *, int);
-
+
/* Allocation counts.. */
/**
#ifndef _DVBOSD_H_
#define _DVBOSD_H_
+#include <linux/compiler.h>
+
typedef enum {
// All functions return -2 on "not open"
OSD_Close=1, // ()
#ifndef _DVBVIDEO_H_
#define _DVBVIDEO_H_
+#include <linux/compiler.h>
+
#ifdef __KERNEL__
#include <linux/types.h>
#else
#include <linux/types.h>
#include <asm/elf.h>
+#ifndef elf_read_implies_exec
+ /* Executables for which elf_read_implies_exec() returns TRUE will
+ have the READ_IMPLIES_EXEC personality flag set automatically.
+ Override in asm/elf.h as needed. */
+# define elf_read_implies_exec(ex, have_pt_gnu_stack) 0
+#endif
+
/* 32-bit ELF base types. */
typedef __u32 Elf32_Addr;
typedef __u16 Elf32_Half;
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
+/* Defined for TUX async IO */
+#define EWOULDBLOCKIO 530 /* Would block due to block-IO */
+
#endif
#endif
#define FBIOGETCMAP 0x4604
#define FBIOPUTCMAP 0x4605
#define FBIOPAN_DISPLAY 0x4606
+#ifdef __KERNEL__
+#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user)
+#else
#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor)
+#endif
/* 0x4607-0x460B are defined below */
/* #define FBIOGET_MONITORSPEC 0x460C */
/* #define FBIOPUT_MONITORSPEC 0x460D */
struct device;
struct file;
+struct fb_cmap_user {
+ __u32 start; /* First entry */
+ __u32 len; /* Number of entries */
+ __u16 __user *red; /* Red values */
+ __u16 __user *green;
+ __u16 __user *blue;
+ __u16 __user *transp; /* transparency, can be NULL */
+};
+
+struct fb_image_user {
+ __u32 dx; /* Where to place image */
+ __u32 dy;
+ __u32 width; /* Size of image */
+ __u32 height;
+ __u32 fg_color; /* Only used when a mono bitmap */
+ __u32 bg_color;
+ __u8 depth; /* Depth of the image */
+ const char __user *data; /* Pointer to image data */
+ struct fb_cmap_user cmap; /* color map info */
+};
+
+struct fb_cursor_user {
+ __u16 set; /* what to set */
+ __u16 enable; /* cursor on/off */
+ __u16 rop; /* bitop operation */
+ const char __user *mask; /* cursor mask bits */
+ struct fbcurpos hot; /* cursor hot spot */
+ struct fb_image_user image; /* Cursor image */
+};
+
/*
* Register/unregister for framebuffer events
*/
/* drivers/video/fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
-extern int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to, int fsfromto);
-extern int fb_set_cmap(struct fb_cmap *cmap, int kspc, struct fb_info *fb_info);
+extern int fb_copy_cmap(struct fb_cmap *from, struct fb_cmap *to);
+extern int fb_cmap_to_user(struct fb_cmap *from, struct fb_cmap_user *to);
+extern int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *fb_info);
+extern int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *fb_info);
extern struct fb_cmap *fb_default_cmap(int len);
extern void fb_invert_cmaps(void);
struct files_struct *get_files_struct(struct task_struct *);
void FASTCALL(put_files_struct(struct files_struct *fs));
+extern int dupfd(struct file *file, unsigned int start);
+
#endif /* __LINUX_FILE_H */
#define FMODE_READ 1
#define FMODE_WRITE 2
+/* Internal kernel extensions */
+#define FMODE_LSEEK 4
+#define FMODE_PREAD 8
+#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */
+
#define RW_MASK 1
#define RWA_MASK 2
#define READ 0
struct block_device * bd_contains;
unsigned bd_block_size;
struct hd_struct * bd_part;
+ /* number of times partitions within this device have been opened. */
unsigned bd_part_count;
int bd_invalidated;
struct gendisk * bd_disk;
extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
extern void do_generic_mapping_read(struct address_space *mapping,
struct file_ra_state *, struct file *,
- loff_t *, read_descriptor_t *, read_actor_t);
+ loff_t *, read_descriptor_t *, read_actor_t, int);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb,
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
extern loff_t remote_llseek(struct file *file, loff_t offset, int origin);
extern int generic_file_open(struct inode * inode, struct file * filp);
+extern int nonseekable_open(struct inode * inode, struct file * filp);
static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
read_descriptor_t * desc,
- read_actor_t actor)
+ read_actor_t actor, int nonblock)
{
do_generic_mapping_read(filp->f_mapping,
&filp->f_ra,
filp,
ppos,
desc,
- actor);
+ actor,
+ nonblock);
}
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
extern int simple_pin_fs(char *name, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
+extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
+
extern int inode_change_ok(struct inode *, struct iattr *);
extern int __must_check inode_setattr(struct inode *, struct iattr *);
void gs_set_termios (struct tty_struct * tty,
struct termios * old_termios);
int gs_init_port(struct gs_port *port);
-int gs_setserial(struct gs_port *port, struct serial_struct *sp);
-int gs_getserial(struct gs_port *port, struct serial_struct *sp);
+int gs_setserial(struct gs_port *port, struct serial_struct __user *sp);
+int gs_getserial(struct gs_port *port, struct serial_struct __user *sp);
void gs_got_break(struct gs_port *port);
extern int gs_debug;
struct gendisk {
int major; /* major number of driver */
int first_minor;
- int minors;
+ int minors; /* maximum number of minors, =1 for
+ * disks that can't be partitioned. */
char disk_name[32]; /* name of major driver */
struct hd_struct **part; /* [indexed by minor] */
struct block_device_operations *fops;
return vma->vm_flags & VM_HUGETLB;
}
-int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *);
+int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
void zap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
#define ICMPV6_MGM_REPORT 131
#define ICMPV6_MGM_REDUCTION 132
-/* definitions for MLDv2 */
-
-#define MLD2_MODE_IS_INCLUDE 1
-#define MLD2_MODE_IS_EXCLUDE 2
-#define MLD2_CHANGE_TO_INCLUDE 3
-#define MLD2_CHANGE_TO_EXCLUDE 4
-#define MLD2_ALLOW_NEW_SOURCES 5
-#define MLD2_BLOCK_OLD_SOURCES 6
+#define ICMPV6_NI_QUERY 139
+#define ICMPV6_NI_REPLY 140
#define ICMPV6_MLD2_REPORT 143
-#define MLD2_ALL_MCR_INIT { { { 0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,0x16 } } }
+
+#define ICMPV6_DHAAD_REQUEST 144
+#define ICMPV6_DHAAD_REPLY 145
+#define ICMPV6_MOBILE_PREFIX_SOL 146
+#define ICMPV6_MOBILE_PREFIX_ADV 147
/*
* Codes for Destination Unreachable
__u32 data[8];
};
+/*
+ * Definitions for MLDv2
+ */
+#define MLD2_MODE_IS_INCLUDE 1
+#define MLD2_MODE_IS_EXCLUDE 2
+#define MLD2_CHANGE_TO_INCLUDE 3
+#define MLD2_CHANGE_TO_EXCLUDE 4
+#define MLD2_ALLOW_NEW_SOURCES 5
+#define MLD2_BLOCK_OLD_SOURCES 6
+
+#define MLD2_ALL_MCR_INIT { { { 0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,0x16 } } }
+
#ifdef __KERNEL__
#include <linux/netdevice.h>
* out).
*/
struct ipmi_msg
+{
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned short data_len;
+ unsigned char __user *data;
+};
+
+struct kernel_ipmi_msg
{
unsigned char netfn;
unsigned char cmd;
ipmi_user_t user;
struct ipmi_addr addr;
long msgid;
- struct ipmi_msg msg;
+ struct kernel_ipmi_msg msg;
/* The user_msg_data is the data supplied when a message was
sent, if this is a response to a sent message. If this is
int ipmi_request(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
void *user_msg_data,
int priority);
int ipmi_request_settime(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
void *user_msg_data,
int priority,
int max_retries,
int ipmi_request_with_source(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
void *user_msg_data,
int priority,
unsigned char source_address,
int ipmi_request_supply_msgs(ipmi_user_t user,
struct ipmi_addr *addr,
long msgid,
- struct ipmi_msg *msg,
+ struct kernel_ipmi_msg *msg,
void *user_msg_data,
void *supplied_smi,
struct ipmi_recv_msg *supplied_recv,
struct console_font_op {
unsigned int op; /* operation code KD_FONT_OP_* */
unsigned int flags; /* KD_FONT_FLAG_* */
+ unsigned int width, height; /* font size */
+ unsigned int charcount;
+ unsigned char __user *data; /* font data with height fixed to 32 */
+};
+
+struct console_font {
unsigned int width, height; /* font size */
unsigned int charcount;
unsigned char *data; /* font data with height fixed to 32 */
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
-/* Cannot easily do prefetch unfortunately */
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
- for (pos = (head)->first; n = pos ? pos->next : 0, pos; \
+ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n)
/**
pos && ({ n = pos->next; 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
+
+/**
+ * hlist_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop counter.
+ * @pos: the &struct hlist_node to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
+ for (pos = (head)->first; \
+ pos && ({ prefetch(pos->next); 1;}) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next, ({ smp_read_barrier_depends(); 0; }) )
+
#else
#warning "don't include kernel headers in userspace"
#endif /* __KERNEL__ */
extern unsigned long vmalloc_earlyreserve;
extern int page_cluster;
+extern int sysctl_legacy_va_layout;
+
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr);
struct file *shmem_file_setup(char * name, loff_t size, unsigned long flags);
-int shmem_lock(struct file * file, int lock, struct user_struct *);
+int shmem_lock(struct file *file, int lock, struct user_struct *user);
int shmem_zero_setup(struct vm_area_struct *);
static inline int can_do_mlock(void)
return 1;
return 0;
}
-
+extern int user_shm_lock(size_t, struct user_struct *);
+extern void user_shm_unlock(size_t, struct user_struct *);
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
unsigned long addr, unsigned long len, pgoff_t pgoff);
extern void exit_mmap(struct mm_struct *);
-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
+
+
+static inline unsigned long get_unmapped_area(struct file * file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
+}
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
-extern unsigned int nr_used_zone_pages(void);
-
extern struct page * vmalloc_to_page(void *addr);
extern struct page * follow_page(struct mm_struct *mm, unsigned long address,
int write);
#define for_each_zone(zone) \
for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
+static inline int is_highmem_idx(int idx)
+{
+ return (idx == ZONE_HIGHMEM);
+}
+
+static inline int is_normal_idx(int idx)
+{
+ return (idx == ZONE_NORMAL);
+}
/**
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
*/
static inline int is_highmem(struct zone *zone)
{
- return (zone - zone->zone_pgdat->node_zones == ZONE_HIGHMEM);
+ return (is_highmem_idx(zone - zone->zone_pgdat->node_zones));
}
static inline int is_normal(struct zone *zone)
{
- return (zone - zone->zone_pgdat->node_zones == ZONE_NORMAL);
+ return (is_normal_idx(zone - zone->zone_pgdat->node_zones));
}
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
struct file;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
int lower_zone_protection_sysctl_handler(struct ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
#include <linux/topology.h>
/* Returns the number of the current Node. */
/* Common Flash Interface structures
* See http://support.intel.com/design/flash/technote/index.htm
- * $Id: cfi.h,v 1.44 2004/07/13 22:32:52 dwmw2 Exp $
+ * $Id: cfi.h,v 1.45 2004/07/20 02:44:27 dwmw2 Exp $
*/
#ifndef __MTD_CFI_H__
static inline void cfi_udelay(int us)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
unsigned long t = us * HZ / 1000000;
if (t) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(t);
return;
}
-#endif
udelay(us);
cond_resched();
}
unsigned mt_segno; /* the segment to read or write */
unsigned mt_mode; /* modes for read/write (sync/async etc.) */
int mt_result; /* result of r/w request, not of the ioctl */
- void *mt_data; /* User space buffer: must be 29kb */
+ void __user *mt_data; /* User space buffer: must be 29kb */
};
/* get tape capacity (ftape/zftape)
int create_mode;
};
-enum { MAX_NESTED_LINKS = 5 };
+enum { MAX_NESTED_LINKS = 8 };
struct nameidata {
struct dentry *dentry;
#define LOOKUP_CONTINUE 4
#define LOOKUP_PARENT 16
#define LOOKUP_NOALT 32
+#define LOOKUP_ATOMIC 64
+
/*
* Intent data
*/
};
struct iovec;
+struct kvec;
extern int sock_wake_async(struct socket *sk, int how, int band);
extern int sock_register(struct net_proto_family *fam);
extern unsigned long net_random(void);
extern void net_srandom(unsigned long);
+extern int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
+ struct kvec *vec, size_t num, size_t len);
+extern int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
+ struct kvec *vec, size_t num,
+ size_t len, int flags);
+
#ifndef CONFIG_SMP
#define SOCKOPS_WRAPPED(name) name
#define SOCKOPS_WRAP(name, fam)
struct Qdisc *qdisc;
struct Qdisc *qdisc_sleeping;
- struct Qdisc *qdisc_list;
struct Qdisc *qdisc_ingress;
+ struct list_head qdisc_list;
unsigned long tx_queue_len; /* Max frames per queue allowed */
/* ingress path synchronizer */
/* bridge stuff */
struct net_bridge_port *br_port;
-#ifdef CONFIG_NET_FASTROUTE
-#define NETDEV_FASTROUTE_HMASK 0xF
- /* Semi-private data. Keep it at the end of device struct. */
- rwlock_t fastpath_lock;
- struct dst_entry *fastpath[NETDEV_FASTROUTE_HMASK+1];
-#endif
#ifdef CONFIG_NET_DIVERT
/* this will get initialized at each interface type init routine */
struct divert_blk *divert;
extern atomic_t netdev_dropping;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern int skb_checksum_help(struct sk_buff **pskb, int inward);
-#ifdef CONFIG_NET_FASTROUTE
-extern int netdev_fastroute;
-extern int netdev_fastroute_obstacles;
-extern void dev_clear_fastroute(struct net_device *dev);
-#endif
#ifdef CONFIG_SYSCTL
extern char *net_sysctl_strdup(const char *s);
u32 c_vers;
unsigned long c_timestamp;
union {
- struct iovec u_vec;
+ struct kvec u_vec;
u32 u_status;
} c_u;
};
int, struct file *);
void nfsd_close(struct file *);
int nfsd_read(struct svc_rqst *, struct svc_fh *,
- loff_t, struct iovec *,int, unsigned long *);
+ loff_t, struct kvec *,int, unsigned long *);
int nfsd_write(struct svc_rqst *, struct svc_fh *,
- loff_t, struct iovec *,int, unsigned long, int *);
+ loff_t, struct kvec *,int, unsigned long, int *);
int nfsd_readlink(struct svc_rqst *, struct svc_fh *,
char *, int *);
int nfsd_symlink(struct svc_rqst *, struct svc_fh *,
struct svc_fh fh;
__u32 offset;
__u32 count;
- struct iovec vec[RPCSVC_MAXPAGES];
+ struct kvec vec[RPCSVC_MAXPAGES];
int vlen;
};
svc_fh fh;
__u32 offset;
int len;
- struct iovec vec[RPCSVC_MAXPAGES];
+ struct kvec vec[RPCSVC_MAXPAGES];
int vlen;
};
struct svc_fh fh;
__u64 offset;
__u32 count;
- struct iovec vec[RPCSVC_MAXPAGES];
+ struct kvec vec[RPCSVC_MAXPAGES];
int vlen;
};
__u32 count;
int stable;
int len;
- struct iovec vec[RPCSVC_MAXPAGES];
+ struct kvec vec[RPCSVC_MAXPAGES];
int vlen;
};
stateid_t rd_stateid; /* request */
u64 rd_offset; /* request */
u32 rd_length; /* request */
- struct iovec rd_iov[RPCSVC_MAXPAGES];
+ struct kvec rd_iov[RPCSVC_MAXPAGES];
int rd_vlen;
struct svc_rqst *rd_rqstp; /* response */
u64 wr_offset; /* request */
u32 wr_stable_how; /* request */
u32 wr_buflen; /* request */
- struct iovec wr_vec[RPCSVC_MAXPAGES]; /* request */
+ struct kvec wr_vec[RPCSVC_MAXPAGES]; /* request */
int wr_vlen;
u32 wr_bytes_written; /* response */
extern struct pci_dev *isa_bridge;
#endif
-#ifndef CONFIG_PCI_USE_VECTOR
+struct msix_entry {
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
+
+#ifndef CONFIG_PCI_MSI
static inline void pci_scan_msi_device(struct pci_dev *dev) {}
static inline int pci_enable_msi(struct pci_dev *dev) {return -1;}
+static inline void pci_disable_msi(struct pci_dev *dev) {}
+static inline int pci_enable_msix(struct pci_dev* dev,
+ struct msix_entry *entries, int nvec) {return -1;}
+static inline void pci_disable_msix(struct pci_dev *dev) {}
static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
#else
extern void pci_scan_msi_device(struct pci_dev *dev);
extern int pci_enable_msi(struct pci_dev *dev);
+extern void pci_disable_msi(struct pci_dev *dev);
+extern int pci_enable_msix(struct pci_dev* dev,
+ struct msix_entry *entries, int nvec);
+extern void pci_disable_msix(struct pci_dev *dev);
extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
-extern int msi_alloc_vectors(struct pci_dev* dev, int *vector, int nvec);
-extern int msi_free_vectors(struct pci_dev* dev, int *vector, int nvec);
#endif
#endif /* CONFIG_PCI */
#define PCI_DEVICE_ID_TTI_HPT302 0x0006
#define PCI_DEVICE_ID_TTI_HPT371 0x0007
#define PCI_DEVICE_ID_TTI_HPT374 0x0008
+#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 // apparently a 372N variant?
#define PCI_VENDOR_ID_VIA 0x1106
#define PCI_DEVICE_ID_VIA_8763_0 0x0198
#define PCI_DEVICE_ID_VIA_8380_0 0x0204
+#define PCI_DEVICE_ID_VIA_3238_0 0x0238
#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259
+#define PCI_DEVICE_ID_VIA_3269_0 0x0269
#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
#define PCI_DEVICE_ID_VIA_8363_0 0x0305
#define PCI_DEVICE_ID_VIA_8371_0 0x0391
#define PCI_DEVICE_ID_VIA_82C686_6 0x3068
#define PCI_DEVICE_ID_VIA_8233_0 0x3074
#define PCI_DEVICE_ID_VIA_8633_0 0x3091
-#define PCI_DEVICE_ID_VIA_8367_0 0x3099
+#define PCI_DEVICE_ID_VIA_8367_0 0x3099
#define PCI_DEVICE_ID_VIA_8653_0 0x3101
-#define PCI_DEVICE_ID_VIA_8622 0x3102
+#define PCI_DEVICE_ID_VIA_8622 0x3102
#define PCI_DEVICE_ID_VIA_8233C_0 0x3109
#define PCI_DEVICE_ID_VIA_8361 0x3112
#define PCI_DEVICE_ID_VIA_XM266 0x3116
#define PCI_DEVICE_ID_VIA_PT880 0x3258
#define PCI_DEVICE_ID_VIA_P4M400 0x3209
#define PCI_DEVICE_ID_VIA_8237 0x3227
+#define PCI_DEVICE_ID_VIA_3296_0 0x0296
#define PCI_DEVICE_ID_VIA_86C100A 0x6100
#define PCI_DEVICE_ID_VIA_8231 0x8231
#define PCI_DEVICE_ID_VIA_8231_4 0x8235
#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572
#define PCI_DEVICE_ID_INTEL_82875_HB 0x2578
#define PCI_DEVICE_ID_INTEL_82875_IG 0x257b
+#define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580
+#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
*/
enum {
MMAP_PAGE_ZERO = 0x0100000,
+ ADDR_COMPAT_LAYOUT = 0x0200000,
+ READ_IMPLIES_EXEC = 0x0400000,
ADDR_LIMIT_32BIT = 0x0800000,
SHORT_INODE = 0x1000000,
WHOLE_SECONDS = 0x2000000,
ADDR_LIMIT_3GB = 0x8000000,
};
+/*
+ * Security-relevant compatibility flags that must be
+ * cleared upon setuid or setgid exec:
+ */
+#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC)
+
/*
* Personality types.
*
struct tc_police
{
__u32 index;
-#ifdef CONFIG_NET_CLS_ACT
- int refcnt;
- int bindcnt;
-#endif
-/* Turned off because it requires new tc
- * to work (for now maintain ABI)
- *
-#ifdef CONFIG_NET_CLS_ACT
- __u32 capab;
-#endif
-*/
int action;
#define TC_POLICE_UNSPEC TC_ACT_UNSPEC
#define TC_POLICE_OK TC_ACT_OK
__u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
+ int refcnt;
+ int bindcnt;
+ __u32 capab;
};
struct tcf_t
TCA_U32_DIVISOR,
TCA_U32_SEL,
TCA_U32_POLICE,
-#ifdef CONFIG_NET_CLS_ACT
TCA_U32_ACT,
-#endif
-#ifdef CONFIG_NET_CLS_IND
TCA_U32_INDEV,
-#endif
+ TCA_U32_PCNT,
__TCA_U32_MAX
};
__u32 val;
int off;
int offmask;
-#ifdef CONFIG_CLS_U32_PERF
- unsigned long kcnt;
-#endif
};
struct tc_u32_sel
short hoff;
__u32 hmask;
-#ifdef CONFIG_CLS_U32_PERF
- unsigned long rcnt;
- unsigned long rhit;
-#endif
struct tc_u32_key keys[0];
};
+#ifdef CONFIG_CLS_U32_PERF
+struct tc_u32_pcnt
+{
+ __u64 rcnt;
+ __u64 rhit;
+ __u64 kcnts[0];
+};
+#endif
/* Flags */
#define TC_U32_TERMINAL 1
TCA_FW_UNSPEC,
TCA_FW_CLASSID,
TCA_FW_POLICE,
-#ifdef CONFIG_NET_CLS_IND
- TCA_FW_INDEV,
-#endif
-#ifdef CONFIG_NET_CLS_ACT
- TCA_FW_ACT,
-#endif
+ TCA_FW_INDEV, /* used by CONFIG_NET_CLS_IND */
+ TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */
__TCA_FW_MAX
};
}
static inline
-int set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
+void set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
{
- int ret = 0;
if (ufdset)
- ret = __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
- if (ret)
- return -EFAULT;
- return 0;
+ __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
}
static inline
extern struct file_operations random_fops, urandom_fops;
#endif
+unsigned int get_random_int(void);
+unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+
#endif /* __KERNEL___ */
#endif /* _LINUX_RANDOM_H */
static inline void set_le_key_k_offset (int version, struct key * key, loff_t offset)
{
(version == KEY_FORMAT_3_5) ?
- (key->u.k_offset_v1.k_offset = cpu_to_le32 (offset)) : /* jdm check */
- (set_offset_v2_k_offset( &(key->u.k_offset_v2), offset ));
+ (void)(key->u.k_offset_v1.k_offset = cpu_to_le32 (offset)) : /* jdm check */
+ (void)(set_offset_v2_k_offset( &(key->u.k_offset_v2), offset ));
}
static inline void set_le_key_k_type (int version, struct key * key, int type)
{
(version == KEY_FORMAT_3_5) ?
- (key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type2uniqueness(type))):
- (set_offset_v2_k_type( &(key->u.k_offset_v2), type ));
+ (void)(key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type2uniqueness(type))):
+ (void)(set_offset_v2_k_type( &(key->u.k_offset_v2), type ));
}
static inline void set_le_ih_k_type (struct item_head * ih, int type)
{
extern struct semaphore rtnl_sem;
-#define rtnl_exlock() do { } while(0)
-#define rtnl_exunlock() do { } while(0)
-#define rtnl_exlock_nowait() (0)
-
#define rtnl_shlock() down(&rtnl_sem)
#define rtnl_shlock_nowait() down_trylock(&rtnl_sem)
#include <linux/aio.h>
+extern unsigned long
+arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+
+extern unsigned long
+arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+extern unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+extern void arch_unmap_area(struct vm_area_struct *area);
+extern void arch_unmap_area_topdown(struct vm_area_struct *area);
+
+
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
struct vm_area_struct * mmap_cache; /* last find_vma result */
+ unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags);
+ unsigned long (*get_unmapped_exec_area) (struct file *filp,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags);
+ void (*unmap_area) (struct vm_area_struct *area);
+ unsigned long mmap_base; /* base of mmap area */
unsigned long free_area_cache; /* first hole */
- unsigned long non_executable_cache; /* last hole top */
- unsigned long mmap_top; /* top of mmap area */
pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
/* Architecture-specific MM context */
mm_context_t context;
+ /* Token based thrashing protection. */
+ unsigned long swap_token_time;
+ char recent_pagein;
+
/* coredumping support */
int core_waiters;
struct completion *core_startup_done, core_done;
unsigned int time_slice, first_time_slice;
struct list_head tasks;
+ /*
+ * ptrace_list/ptrace_children forms the list of my children
+ * that were stolen by a ptracer.
+ */
struct list_head ptrace_children;
struct list_head ptrace_list;
*/
struct task_struct *real_parent; /* real parent process (when being debugged) */
struct task_struct *parent; /* parent process */
+ /*
+ * children/sibling forms the list of my children plus the
+ * tasks I'm ptracing.
+ */
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
+
+ /* TUX state */
+ void *tux_info;
+ void (*tux_exit)(void);
+
void *security;
struct audit_context *audit_context;
#endif /* CONFIG_SMP */
+#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
+extern void arch_pick_mmap_layout(struct mm_struct *mm);
+#else
+static inline void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+ mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+}
+#endif
+
#endif /* __KERNEL__ */
#endif
/* PPC CPM type number */
#define PORT_CPM 58
-/* Marvell MPSC for PPC & MIPS */
-#define PORT_MPSC 59
+/* MPC52xx type numbers */
+#define PORT_MPC52xx 59
#ifdef __KERNEL__
time_t shm_ctim;
pid_t shm_cprid;
pid_t shm_lprid;
- struct user_struct * mlock_user;
+ struct user_struct *mlock_user;
};
/* shm_mode upper byte flags */
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
*/
- char cb[48];
+ char cb[40];
unsigned int len,
data_len,
extern void skb_init(void);
extern void skb_add_mtu(int mtu);
+struct skb_iter {
+ /* Iteration functions set these */
+ unsigned char *data;
+ unsigned int len;
+
+ /* Private to iteration */
+ unsigned int nextfrag;
+ struct sk_buff *fraglist;
+};
+
+/* Keep iterating until skb_iter_next returns false. */
+extern void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i);
+extern int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i);
+/* Call this if aborting loop before !skb_iter_next */
+extern void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i);
+
+struct tux_req_struct;
+
#ifdef CONFIG_NETFILTER
static inline void nf_conntrack_put(struct nf_ct_info *nfct)
{
extern int move_addr_to_kernel(void __user *uaddr, int ulen, void *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+struct socket;
+struct file * sock_map_file(struct socket *sock);
+extern int sock_map_fd(struct socket *sock);
+extern struct socket *sockfd_lookup(int fd, int *err);
+
#endif
#endif /* not kernel and not glibc */
#endif /* _LINUX_SOCKET_H */
* read responses (that have a header, and some data pages, and possibly
* a tail) and means we can share some client side routines.
*
- * The xdr_buf.head iovec always points to the first page in the rq_*pages
+ * The xdr_buf.head kvec always points to the first page in the rq_*pages
* list. The xdr_buf.pages pointer points to the second page on that
* list. xdr_buf.tail points to the end of the first page.
* This assumes that the non-page part of an rpc reply will fit
*/
#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 2)
-static inline u32 svc_getu32(struct iovec *iov)
+static inline u32 svc_getu32(struct kvec *iov)
{
u32 val, *vp;
vp = iov->iov_base;
iov->iov_len -= sizeof(u32);
return val;
}
-static inline void svc_putu32(struct iovec *iov, u32 val)
+static inline void svc_putu32(struct kvec *iov, u32 val)
{
u32 *vp = iov->iov_base + iov->iov_len;
*vp = val;
xdr_argsize_check(struct svc_rqst *rqstp, u32 *p)
{
char *cp = (char *)p;
- struct iovec *vec = &rqstp->rq_arg.head[0];
+ struct kvec *vec = &rqstp->rq_arg.head[0];
return cp - (char*)vec->iov_base <= vec->iov_len;
}
static inline int
xdr_ressize_check(struct svc_rqst *rqstp, u32 *p)
{
- struct iovec *vec = &rqstp->rq_res.head[0];
+ struct kvec *vec = &rqstp->rq_res.head[0];
char *cp = (char*)p;
vec->iov_len = cp - (char*)vec->iov_base;
* operations and/or has a need for scatter/gather involving pages.
*/
struct xdr_buf {
- struct iovec head[1], /* RPC header + non-page data */
+ struct kvec head[1], /* RPC header + non-page data */
tail[1]; /* Appended after page data */
struct page ** pages; /* Array of contiguous pages */
}
/*
- * Adjust iovec to reflect end of xdr'ed data (RPC client XDR)
+ * Adjust kvec to reflect end of xdr'ed data (RPC client XDR)
*/
static inline int
-xdr_adjust_iovec(struct iovec *iov, u32 *p)
+xdr_adjust_iovec(struct kvec *iov, u32 *p)
{
return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base);
}
-void xdr_shift_iovec(struct iovec *, int, size_t);
+void xdr_shift_iovec(struct kvec *, int, size_t);
/*
* Maximum number of iov's we use.
/*
* XDR buffer helper functions
*/
-extern int xdr_kmap(struct iovec *, struct xdr_buf *, size_t);
+extern int xdr_kmap(struct kvec *, struct xdr_buf *, size_t);
extern void xdr_kunmap(struct xdr_buf *, size_t);
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void _copy_from_pages(char *, struct page **, size_t, size_t);
-extern void xdr_buf_from_iov(struct iovec *, struct xdr_buf *);
+extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, int, int);
extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, int);
extern int read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len);
struct xdr_buf *buf; /* XDR buffer to read/write */
uint32_t *end; /* end of available buffer space */
- struct iovec *iov; /* pointer to the current iovec */
+ struct kvec *iov; /* pointer to the current kvec */
};
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p);
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
/* linux/mm/oom_kill.c */
-extern void out_of_memory(void);
+extern void out_of_memory(int gfp_mask);
/* linux/mm/memory.c */
extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
extern struct page * lookup_swap_cache(swp_entry_t);
extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *vma,
unsigned long addr);
+/* linux/mm/thrash.c */
+#ifdef CONFIG_SWAP
+extern struct mm_struct * swap_token_mm;
+extern void grab_swap_token(void);
+extern void __put_swap_token(struct mm_struct *);
+
+static inline int has_swap_token(struct mm_struct *mm)
+{
+ return (mm == swap_token_mm);
+}
+
+static inline void put_swap_token(struct mm_struct *mm)
+{
+ if (has_swap_token(mm))
+ __put_swap_token(mm);
+}
+#else /* CONFIG_SWAP */
+#define put_swap_token(x) do { } while(0)
+#define grab_swap_token do { } while(0)
+#define has_swap_token 0
+#endif /* CONFIG_SWAP */
/* linux/mm/swapfile.c */
extern long total_swap_pages;
VM_BLOCK_DUMP=24, /* block dump mode */
VM_HUGETLB_GROUP=25, /* permitted hugetlb group */
VM_VFS_CACHE_PRESSURE=26, /* dcache/icache reclaim pressure */
+ VM_LEGACY_VA_LAYOUT=27, /* legacy/compatibility virtual address space layout */
};
NET_DECNET=15,
NET_ECONET=16,
NET_SCTP=17,
+ NET_TUX=18,
};
/* /proc/sys/kernel/random */
NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
};
+/* /proc/sys/net/tux/ */
+enum {
+ NET_TUX_DOCROOT = 1,
+ NET_TUX_LOGFILE = 2,
+ NET_TUX_EXTCGI = 3,
+ NET_TUX_STOP = 4,
+ NET_TUX_CLIENTPORT = 5,
+ NET_TUX_LOGGING = 6,
+ NET_TUX_SERVERPORT = 7,
+ NET_TUX_THREADS = 8,
+ NET_TUX_KEEPALIVE_TIMEOUT = 9,
+ NET_TUX_MAX_KEEPALIVE_BW = 10,
+ NET_TUX_DEFER_ACCEPT = 11,
+ NET_TUX_MAX_FREE_REQUESTS = 12,
+ NET_TUX_MAX_CONNECT = 13,
+ NET_TUX_MAX_BACKLOG = 14,
+ NET_TUX_MODE_FORBIDDEN = 15,
+ NET_TUX_MODE_ALLOWED = 16,
+ NET_TUX_MODE_USERSPACE = 17,
+ NET_TUX_MODE_CGI = 18,
+ NET_TUX_CGI_UID = 19,
+ NET_TUX_CGI_GID = 20,
+ NET_TUX_CGIROOT = 21,
+ NET_TUX_LOGENTRY_ALIGN_ORDER = 22,
+ NET_TUX_NONAGLE = 23,
+ NET_TUX_ACK_PINGPONG = 24,
+ NET_TUX_PUSH_ALL = 25,
+ NET_TUX_ZEROCOPY_PARSE = 26,
+ NET_CONFIG_TUX_DEBUG_BLOCKING = 27,
+ NET_TUX_PAGE_AGE_START = 28,
+ NET_TUX_PAGE_AGE_ADV = 29,
+ NET_TUX_PAGE_AGE_MAX = 30,
+ NET_TUX_VIRTUAL_SERVER = 31,
+ NET_TUX_MAX_OBJECT_SIZE = 32,
+ NET_TUX_COMPRESSION = 33,
+ NET_TUX_NOID = 34,
+ NET_TUX_CGI_INHERIT_CPU = 35,
+ NET_TUX_CGI_CPU_MASK = 36,
+ NET_TUX_ZEROCOPY_HEADER = 37,
+ NET_TUX_ZEROCOPY_SENDFILE = 38,
+ NET_TUX_ALL_USERSPACE = 39,
+ NET_TUX_REDIRECT_LOGGING = 40,
+ NET_TUX_REFERER_LOGGING = 41,
+ NET_TUX_MAX_HEADER_LEN = 42,
+ NET_TUX_404_PAGE = 43,
+ NET_TUX_MAX_KEEPALIVES = 44,
+ NET_TUX_IGNORE_QUERY = 45,
+};
+
/* CTL_PROC names: */
/* CTL_FS names: */
void **context);
typedef int proc_handler (ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp);
+ void __user *buffer, size_t *lenp, loff_t *ppos);
extern int proc_dostring(ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
extern int proc_dointvec(ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
extern int proc_dointvec_bset(ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
extern int proc_dointvec_jiffies(ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
extern int proc_dointvec_userhz_jiffies(ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
extern int proc_doulongvec_minmax(ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
extern int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int,
- struct file *, void __user *, size_t *);
+ struct file *, void __user *, size_t *, loff_t *);
extern int do_sysctl (int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
__u32 cnt; /* increase cwnd by 1 after this number of ACKs */
__u32 last_max_cwnd; /* last maximium snd_cwnd */
__u32 last_cwnd; /* the last snd_cwnd */
+ __u32 last_stamp; /* time when updated last_cwnd */
} bictcp;
};
void unblank_screen(void);
void poke_blanked_console(void);
int con_font_op(int currcons, struct console_font_op *op);
+int con_font_set(int currcons, struct console_font_op *op);
+int con_font_get(int currcons, struct console_font_op *op);
+int con_font_default(int currcons, struct console_font_op *op);
+int con_font_copy(int currcons, struct console_font_op *op);
int con_set_cmap(unsigned char __user *cmap);
int con_get_cmap(unsigned char __user *cmap);
void scrollback(int);
struct ctl_table;
struct file;
int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
void page_writeback_init(void);
void balance_dirty_pages_ratelimited(struct address_space *mapping);
struct mtd_oob_buf {
uint32_t start;
uint32_t length;
- unsigned char *ptr;
+ unsigned char __user *ptr;
};
#define MTD_ABSENT 0
inet6_ifa_finish_destroy(ifp);
}
-#define __in6_ifa_put(idev) atomic_dec(&(idev)->refcnt)
-#define in6_ifa_hold(idev) atomic_inc(&(idev)->refcnt)
+#define __in6_ifa_put(ifp) atomic_dec(&(ifp)->refcnt)
+#define in6_ifa_hold(ifp) atomic_inc(&(ifp)->refcnt)
extern void addrconf_forwarding_on(void);
* This will include the IEEE address token on links that support it.
*/
- word = addr->s6_addr[2] ^ addr->s6_addr32[3];
- word ^= (word>>16);
+ word = addr->s6_addr32[2] ^ addr->s6_addr32[3];
+ word ^= (word >> 16);
word ^= (word >> 8);
return ((word ^ (word >> 4)) & 0x0f);
__u8 pscan_period_mode;
__u8 dev_class[3];
__u16 clock_offset;
- __u8 rssi;
+ __s8 rssi;
} __attribute__ ((packed));
#define HCI_EV_CONN_COMPLETE 0x03
#define ICMP_INC_STATS(field) SNMP_INC_STATS(icmp_statistics, field)
#define ICMP_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmp_statistics, field)
#define ICMP_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmp_statistics, field)
-#define ICMP_INC_STATS_FIELD(offt) \
- (*((unsigned long *) ((void *) \
- per_cpu_ptr(icmp_statistics[!in_softirq()],\
- smp_processor_id()) + offt)))++
-#define ICMP_INC_STATS_BH_FIELD(offt) \
- (*((unsigned long *) ((void *) \
- per_cpu_ptr(icmp_statistics[0], \
- smp_processor_id()) + offt)))++
-#define ICMP_INC_STATS_USER_FIELD(offt) \
- (*((unsigned long *) ((void *) \
- per_cpu_ptr(icmp_statistics[1], \
- smp_processor_id()) + offt)))++
extern void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info);
extern int icmp_rcv(struct sk_buff *skb);
#include <linux/ip.h>
+enum {
+ INET_ECN_NOT_ECT = 0,
+ INET_ECN_ECT_1 = 1,
+ INET_ECN_ECT_0 = 2,
+ INET_ECN_CE = 3,
+ INET_ECN_MASK = 3,
+};
+
static inline int INET_ECN_is_ce(__u8 dsfield)
{
- return (dsfield&3) == 3;
+ return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
}
static inline int INET_ECN_is_not_ce(__u8 dsfield)
{
- return (dsfield&3) == 2;
+ return (dsfield & INET_ECN_MASK) == INET_ECN_ECT_0;
}
static inline int INET_ECN_is_capable(__u8 dsfield)
{
- return (dsfield&2);
+ return (dsfield & INET_ECN_ECT_0);
}
static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
{
- outer &= ~3;
+ outer &= ~INET_ECN_MASK;
if (INET_ECN_is_capable(inner))
- outer |= (inner & 3);
+ outer |= (inner & INET_ECN_MASK);
return outer;
}
-#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= 2; } while (0)
-#define INET_ECN_dontxmit(sk) do { inet_sk(sk)->tos &= ~3; } while (0)
+#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0)
+#define INET_ECN_dontxmit(sk) \
+ do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0)
-#define IP6_ECN_flow_init(label) do { \
- (label) &= ~htonl(3<<20); \
+#define IP6_ECN_flow_init(label) do { \
+ (label) &= ~htonl(INET_ECN_MASK << 20); \
} while (0)
-#define IP6_ECN_flow_xmit(sk, label) do { \
- if (INET_ECN_is_capable(inet_sk(sk)->tos)) \
- (label) |= __constant_htons(2 << 4); \
+#define IP6_ECN_flow_xmit(sk, label) do { \
+ if (INET_ECN_is_capable(inet_sk(sk)->tos)) \
+ (label) |= __constant_htons(INET_ECN_ECT_0 << 4); \
} while (0)
static inline void IP_ECN_set_ce(struct iphdr *iph)
u32 check = iph->check;
check += __constant_htons(0xFFFE);
iph->check = check + (check>=0xFFFF);
- iph->tos |= 1;
+ iph->tos |= INET_ECN_CE;
}
static inline void IP_ECN_clear(struct iphdr *iph)
{
- iph->tos &= ~3;
+ iph->tos &= ~INET_ECN_MASK;
}
struct ipv6hdr;
static inline void IP6_ECN_set_ce(struct ipv6hdr *iph)
{
- *(u32*)iph |= htonl(1<<20);
+ *(u32*)iph |= htonl(INET_ECN_CE << 20);
}
static inline void IP6_ECN_clear(struct ipv6hdr *iph)
{
- *(u32*)iph &= ~htonl(3<<20);
+ *(u32*)iph &= ~htonl(INET_ECN_MASK << 20);
}
#define ip6_get_dsfield(iph) ((ntohs(*(u16*)(iph)) >> 4) & 0xFF)
extern int ip_push_pending_frames(struct sock *sk);
extern void ip_flush_pending_frames(struct sock *sk);
+/* datagram.c */
+extern int ip4_datagram_connect(struct sock *sk,
+ struct sockaddr *uaddr, int addr_len);
/*
* Map a multicast IP onto multicast MAC for type Token Ring.
}
struct ip_reply_arg {
- struct iovec iov[1];
- u32 csum;
- int csumoffset; /* u16 offset of csum in iov[0].iov_base */
- /* -1 if not needed */
+ struct kvec iov[1];
+ u32 csum;
+ int csumoffset; /* u16 offset of csum in iov[0].iov_base */
+ /* -1 if not needed */
};
void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
*/
int ipv4_doint_and_flush(ctl_table *ctl, int write,
struct file* filp, void __user *buffer,
- size_t *lenp);
+ size_t *lenp, loff_t *ppos);
int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
/*
* Store a destination cache entry in a socket
- * For UDP/RAW sockets this is done on udp_connect.
*/
-
static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
struct in6_addr *daddr)
{
extern void ipv6_packet_cleanup(void);
+extern int ip6_datagram_connect(struct sock *sk,
+ struct sockaddr *addr, int addr_len);
+
extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, u16 port,
u32 info, u8 *payload);
return(irlap_is_primary(self->lsap->lap->irlap));
}
-extern struct irttp_cb *irttp;
-
#endif /* IRTTP_H */
int write,
struct file * filp,
void __user *buffer,
- size_t *lenp);
+ size_t *lenp,
+ loff_t *ppos);
#endif
extern void inet6_ifinfo_notify(int event,
* nr_node & nr_neigh lists, refcounting and locking
*********************************************************************/
-extern struct hlist_head nr_node_list;
-extern struct hlist_head nr_neigh_list;
-
#define nr_node_hold(__nr_node) \
atomic_inc(&((__nr_node)->refcount))
#ifndef __NET_PKT_SCHED_H
#define __NET_PKT_SCHED_H
-#define PSCHED_GETTIMEOFDAY 1
-#define PSCHED_JIFFIES 2
-#define PSCHED_CPU 3
-
-#define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES
-
#include <linux/config.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
-#ifdef CONFIG_X86_TSC
-#include <asm/msr.h>
-#endif
-
-
struct rtattr;
struct Qdisc;
#define TCQ_F_BUILTIN 1
#define TCQ_F_THROTTLED 2
#define TCQ_F_INGRES 4
+ int padded;
struct Qdisc_ops *ops;
- struct Qdisc *next;
u32 handle;
atomic_t refcnt;
struct sk_buff_head q;
struct net_device *dev;
+ struct list_head list;
struct tc_stats stats;
spinlock_t *stats_lock;
* and it will live until better solution will be invented.
*/
struct Qdisc *__parent;
-
- char data[0];
};
+#define QDISC_ALIGN 32
+#define QDISC_ALIGN_CONST (QDISC_ALIGN - 1)
+
+static inline void *qdisc_priv(struct Qdisc *q)
+{
+ return (char *)q + ((sizeof(struct Qdisc) + QDISC_ALIGN_CONST)
+ & ~QDISC_ALIGN_CONST);
+}
+
struct qdisc_rate_table
{
struct tc_ratespec rate;
int refcnt;
};
-static inline void sch_tree_lock(struct Qdisc *q)
-{
- write_lock(&qdisc_tree_lock);
- spin_lock_bh(&q->dev->queue_lock);
-}
-
-static inline void sch_tree_unlock(struct Qdisc *q)
-{
- spin_unlock_bh(&q->dev->queue_lock);
- write_unlock(&qdisc_tree_lock);
-}
-
-static inline void tcf_tree_lock(struct tcf_proto *tp)
-{
- write_lock(&qdisc_tree_lock);
- spin_lock_bh(&tp->q->dev->queue_lock);
-}
-
-static inline void tcf_tree_unlock(struct tcf_proto *tp)
-{
- spin_unlock_bh(&tp->q->dev->queue_lock);
- write_unlock(&qdisc_tree_lock);
-}
-
-
-static inline unsigned long
-cls_set_class(struct tcf_proto *tp, unsigned long *clp, unsigned long cl)
-{
- unsigned long old_cl;
+extern void qdisc_lock_tree(struct net_device *dev);
+extern void qdisc_unlock_tree(struct net_device *dev);
- tcf_tree_lock(tp);
- old_cl = *clp;
- *clp = cl;
- tcf_tree_unlock(tp);
- return old_cl;
-}
+#define sch_tree_lock(q) qdisc_lock_tree((q)->dev)
+#define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev)
+#define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev)
+#define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev)
+#define cls_set_class(tp, clp, cl) tcf_set_class(tp, clp, cl)
static inline unsigned long
__cls_set_class(unsigned long *clp, unsigned long cl)
{
The reason is that, when it is not the same thing as
gettimeofday, it returns invalid timestamp, which is
not updated, when net_bh is active.
-
- So, use PSCHED_CLOCK_SOURCE = PSCHED_CPU on alpha and pentiums
- with rtdsc. And PSCHED_JIFFIES on all other architectures, including [34]86
- and pentiums without rtdsc.
- You can use PSCHED_GETTIMEOFDAY on another architectures,
- which have fast and precise clock source, but it is too expensive.
*/
/* General note about internal clock.
Any clock source returns time intervals, measured in units
- close to 1usec. With source PSCHED_GETTIMEOFDAY it is precisely
+ close to 1usec. With source CONFIG_NET_SCH_CLK_GETTIMEOFDAY it is precisely
microseconds, otherwise something close but different chosen to minimize
arithmetic cost. Ratio usec/internal untis in form nominator/denominator
may be read from /proc/net/psched.
*/
-#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
typedef struct timeval psched_time_t;
typedef long psched_tdiff_t;
#define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ))
#define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ))
-#else /* PSCHED_CLOCK_SOURCE != PSCHED_GETTIMEOFDAY */
+#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
typedef u64 psched_time_t;
typedef long psched_tdiff_t;
-extern psched_time_t psched_time_base;
-
-#if PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
+#ifdef CONFIG_NET_SCH_CLK_JIFFIES
#if HZ < 96
#define PSCHED_JSCALE 14
#define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
#define PSCHED_JIFFIE2US(delay) ((delay)<<PSCHED_JSCALE)
-#elif PSCHED_CLOCK_SOURCE == PSCHED_CPU
+#endif /* CONFIG_NET_SCH_CLK_JIFFIES */
+#ifdef CONFIG_NET_SCH_CLK_CPU
+#include <asm/timex.h>
extern psched_tdiff_t psched_clock_per_hz;
extern int psched_clock_scale;
-
+extern psched_time_t psched_time_base;
+extern cycles_t psched_time_mark;
+
+#define PSCHED_GET_TIME(stamp) \
+do { \
+ cycles_t cur = get_cycles(); \
+ if (sizeof(cycles_t) == sizeof(u32)) { \
+ if (cur <= psched_time_mark) \
+ psched_time_base += 0x100000000ULL; \
+ psched_time_mark = cur; \
+ (stamp) = (psched_time_base + cur)>>psched_clock_scale; \
+ } else { \
+ (stamp) = cur>>psched_clock_scale; \
+ } \
+} while (0)
#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
#define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz)
-#ifdef CONFIG_X86_TSC
-
-#define PSCHED_GET_TIME(stamp) \
-({ u64 __cur; \
- rdtscll(__cur); \
- (stamp) = __cur>>psched_clock_scale; \
-})
-
-#elif defined (__alpha__)
-
-#define PSCHED_WATCHER u32
+#endif /* CONFIG_NET_SCH_CLK_CPU */
-extern PSCHED_WATCHER psched_time_mark;
+#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
-#define PSCHED_GET_TIME(stamp) \
-({ u32 __res; \
- __asm__ __volatile__ ("rpcc %0" : "r="(__res)); \
- if (__res <= psched_time_mark) psched_time_base += 0x100000000UL; \
- psched_time_mark = __res; \
- (stamp) = (psched_time_base + __res)>>psched_clock_scale; \
-})
-
-#else
-
-#error PSCHED_CLOCK_SOURCE=PSCHED_CPU is not supported on this arch.
-
-#endif /* ARCH */
-
-#endif /* PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES */
-
-#endif /* PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY */
-
-#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
#define PSCHED_TDIFF(tv1, tv2) \
({ \
int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
#define PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; })
-#else
+#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
#define PSCHED_IS_PASTPERFECT(t) ((t) == 0)
#define PSCHED_AUDIT_TDIFF(t)
-#endif
+#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
struct tcf_police
{
extern int tcf_act_police(struct sk_buff **skb, struct tc_action *a);
#endif
+extern unsigned long tcf_set_class(struct tcf_proto *tp, unsigned long *clp,
+ unsigned long cl);
extern int tcf_police(struct sk_buff *skb, struct tcf_police *p);
extern int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st, spinlock_t *lock);
extern void tcf_police_destroy(struct tcf_police *p);
extern int qdisc_restart(struct net_device *dev);
-static inline void qdisc_run(struct net_device *dev)
-{
- while (!netif_queue_stopped(dev) &&
- qdisc_restart(dev)<0)
- /* NOTHING */;
-}
-
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
SCTP_CMD_REPORT_FWDTSN, /* Report new cumulative TSN Ack. */
SCTP_CMD_PROCESS_FWDTSN, /* Skips were reported, so process further. */
SCTP_CMD_CLEAR_INIT_TAG, /* Clears association peer's inittag. */
+ SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
+ SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
+ SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */
SCTP_CMD_LAST
} sctp_verb_t;
SCTP_IERROR_BAD_TAG,
SCTP_IERROR_BIG_GAP,
SCTP_IERROR_DUP_TSN,
+ SCTP_IERROR_HIGH_TSN,
+ SCTP_IERROR_IGNORE_TSN,
+ SCTP_IERROR_NO_DATA,
+ SCTP_IERROR_BAD_STREAM,
} sctp_ierror_t;
const struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_chunk *err_chunk);
+int sctp_eat_data(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ sctp_cmd_seq_t *commands);
/* 3rd level prototypes */
__u32 sctp_generate_tag(const struct sctp_endpoint *);
#define _SNMP_H
#include <linux/cache.h>
-
-/*
- * We use all unsigned longs. Linux will soon be so reliable that even these
- * will rapidly get too small 8-). Seriously consider the IpInReceives count
- * on the 20Gb/s + networks people expect in a few years time!
- */
-
-/*
- * The rule for padding:
- * Best is power of two because then the right structure can be found by a simple
- * shift. The structure should be always cache line aligned.
- * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add instructions
- * to emulate multiply in case it is not power-of-two. Currently n is always <=3 for
- * all sizes so simple cache line alignment is enough.
- *
- * The best solution would be a global CPU local area , especially on 64 and 128byte
- * cacheline machine it makes a *lot* of sense -AK
- */
-
-struct snmp_item {
- char *name;
- int offset;
-};
-
-#define SNMP_ITEM(mib,entry,procname) { \
- .name = procname, \
- .offset = offsetof(mib, entry), \
-}
-
-#define SNMP_ITEM_SENTINEL { \
- .name = NULL, \
- .offset = 0, \
-}
+#include <linux/snmp.h>
/*
- * RFC 1213: MIB-II
- * RFC 2011 (updates 1213): SNMPv2-MIB-IP
- * RFC 2863: Interfaces Group MIB
- * RFC 2465: IPv6 MIB: General Group
- * draft-ietf-ipv6-rfc2011-update-10.txt: MIB for IP: IP Statistics Tables
+ * Mibs are stored in array of unsigned long.
*/
-struct ipstats_mib
-{
- unsigned long InReceives;
- unsigned long InHdrErrors;
- unsigned long InTooBigErrors;
- unsigned long InNoRoutes;
- unsigned long InAddrErrors;
- unsigned long InUnknownProtos;
- unsigned long InTruncatedPkts;
- unsigned long InDiscards;
- unsigned long InDelivers;
- unsigned long OutForwDatagrams;
- unsigned long OutRequests;
- unsigned long OutDiscards;
- unsigned long OutNoRoutes;
- unsigned long ReasmTimeout;
- unsigned long ReasmReqds;
- unsigned long ReasmOKs;
- unsigned long ReasmFails;
- unsigned long FragOKs;
- unsigned long FragFails;
- unsigned long FragCreates;
- unsigned long InMcastPkts;
- unsigned long OutMcastPkts;
- unsigned long __pad[0];
-};
-
/*
- * RFC 1213: MIB-II ICMP Group
- * RFC 2011 (updates 1213): SNMPv2 MIB for IP: ICMP group
+ * struct snmp_mib{}
+ * - list of entries for particular API (such as /proc/net/snmp)
+ * - name of entries.
*/
-struct icmp_mib
-{
- unsigned long IcmpInMsgs;
- unsigned long IcmpInErrors;
- unsigned long IcmpInDestUnreachs;
- unsigned long IcmpInTimeExcds;
- unsigned long IcmpInParmProbs;
- unsigned long IcmpInSrcQuenchs;
- unsigned long IcmpInRedirects;
- unsigned long IcmpInEchos;
- unsigned long IcmpInEchoReps;
- unsigned long IcmpInTimestamps;
- unsigned long IcmpInTimestampReps;
- unsigned long IcmpInAddrMasks;
- unsigned long IcmpInAddrMaskReps;
- unsigned long IcmpOutMsgs;
- unsigned long IcmpOutErrors;
- unsigned long IcmpOutDestUnreachs;
- unsigned long IcmpOutTimeExcds;
- unsigned long IcmpOutParmProbs;
- unsigned long IcmpOutSrcQuenchs;
- unsigned long IcmpOutRedirects;
- unsigned long IcmpOutEchos;
- unsigned long IcmpOutEchoReps;
- unsigned long IcmpOutTimestamps;
- unsigned long IcmpOutTimestampReps;
- unsigned long IcmpOutAddrMasks;
- unsigned long IcmpOutAddrMaskReps;
- unsigned long dummy;
- unsigned long __pad[0];
+struct snmp_mib {
+ char *name;
+ int entry;
};
-/*
- * RFC 2466: ICMPv6-MIB
- */
-struct icmpv6_mib
-{
- unsigned long Icmp6InMsgs;
- unsigned long Icmp6InErrors;
-
- unsigned long Icmp6InDestUnreachs;
- unsigned long Icmp6InPktTooBigs;
- unsigned long Icmp6InTimeExcds;
- unsigned long Icmp6InParmProblems;
-
- unsigned long Icmp6InEchos;
- unsigned long Icmp6InEchoReplies;
- unsigned long Icmp6InGroupMembQueries;
- unsigned long Icmp6InGroupMembResponses;
- unsigned long Icmp6InGroupMembReductions;
- unsigned long Icmp6InRouterSolicits;
- unsigned long Icmp6InRouterAdvertisements;
- unsigned long Icmp6InNeighborSolicits;
- unsigned long Icmp6InNeighborAdvertisements;
- unsigned long Icmp6InRedirects;
-
- unsigned long Icmp6OutMsgs;
+#define SNMP_MIB_ITEM(_name,_entry) { \
+ .name = _name, \
+ .entry = _entry, \
+}
- unsigned long Icmp6OutDestUnreachs;
- unsigned long Icmp6OutPktTooBigs;
- unsigned long Icmp6OutTimeExcds;
- unsigned long Icmp6OutParmProblems;
+#define SNMP_MIB_SENTINEL { \
+ .name = NULL, \
+ .entry = 0, \
+}
- unsigned long Icmp6OutEchoReplies;
- unsigned long Icmp6OutRouterSolicits;
- unsigned long Icmp6OutNeighborSolicits;
- unsigned long Icmp6OutNeighborAdvertisements;
- unsigned long Icmp6OutRedirects;
- unsigned long Icmp6OutGroupMembResponses;
- unsigned long Icmp6OutGroupMembReductions;
- unsigned long __pad[0];
-};
-
-/*
- * RFC 1213: MIB-II TCP group
- * RFC 2012 (updates 1213): SNMPv2-MIB-TCP
- */
-struct tcp_mib
-{
- unsigned long TcpRtoAlgorithm;
- unsigned long TcpRtoMin;
- unsigned long TcpRtoMax;
- unsigned long TcpMaxConn;
- unsigned long TcpActiveOpens;
- unsigned long TcpPassiveOpens;
- unsigned long TcpAttemptFails;
- unsigned long TcpEstabResets;
- unsigned long TcpCurrEstab;
- unsigned long TcpInSegs;
- unsigned long TcpOutSegs;
- unsigned long TcpRetransSegs;
- unsigned long TcpInErrs;
- unsigned long TcpOutRsts;
- unsigned long __pad[0];
-};
-
/*
- * RFC 1213: MIB-II UDP group
- * RFC 2013 (updates 1213): SNMPv2-MIB-UDP
+ * We use all unsigned longs. Linux will soon be so reliable that even
+ * these will rapidly get too small 8-). Seriously consider the IpInReceives
+ * count on the 20Gb/s + networks people expect in a few years time!
*/
-struct udp_mib
-{
- unsigned long UdpInDatagrams;
- unsigned long UdpNoPorts;
- unsigned long UdpInErrors;
- unsigned long UdpOutDatagrams;
- unsigned long __pad[0];
-};
-/* draft-ietf-sigtran-sctp-mib-07.txt */
-struct sctp_mib
-{
- unsigned long SctpCurrEstab;
- unsigned long SctpActiveEstabs;
- unsigned long SctpPassiveEstabs;
- unsigned long SctpAborteds;
- unsigned long SctpShutdowns;
- unsigned long SctpOutOfBlues;
- unsigned long SctpChecksumErrors;
- unsigned long SctpOutCtrlChunks;
- unsigned long SctpOutOrderChunks;
- unsigned long SctpOutUnorderChunks;
- unsigned long SctpInCtrlChunks;
- unsigned long SctpInOrderChunks;
- unsigned long SctpInUnorderChunks;
- unsigned long SctpFragUsrMsgs;
- unsigned long SctpReasmUsrMsgs;
- unsigned long SctpOutSCTPPacks;
- unsigned long SctpInSCTPPacks;
- unsigned long SctpRtoAlgorithm;
- unsigned long SctpRtoMin;
- unsigned long SctpRtoMax;
- unsigned long SctpRtoInitial;
- unsigned long SctpValCookieLife;
- unsigned long SctpMaxInitRetr;
- unsigned long __pad[0];
-};
+/*
+ * The rule for padding:
+ * Best is power of two because then the right structure can be found by a
+ * simple shift. The structure should be always cache line aligned.
+ * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add
+ * instructions to emulate multiply in case it is not power-of-two.
+ * Currently n is always <=3 for all sizes so simple cache line alignment
+ * is enough.
+ *
+ * The best solution would be a global CPU local area , especially on 64
+ * and 128byte cacheline machine it makes a *lot* of sense -AK
+ */
-struct linux_mib
-{
- unsigned long SyncookiesSent;
- unsigned long SyncookiesRecv;
- unsigned long SyncookiesFailed;
- unsigned long EmbryonicRsts;
- unsigned long PruneCalled;
- unsigned long RcvPruned;
- unsigned long OfoPruned;
- unsigned long OutOfWindowIcmps;
- unsigned long LockDroppedIcmps;
- unsigned long ArpFilter;
- unsigned long TimeWaited;
- unsigned long TimeWaitRecycled;
- unsigned long TimeWaitKilled;
- unsigned long PAWSPassiveRejected;
- unsigned long PAWSActiveRejected;
- unsigned long PAWSEstabRejected;
- unsigned long DelayedACKs;
- unsigned long DelayedACKLocked;
- unsigned long DelayedACKLost;
- unsigned long ListenOverflows;
- unsigned long ListenDrops;
- unsigned long TCPPrequeued;
- unsigned long TCPDirectCopyFromBacklog;
- unsigned long TCPDirectCopyFromPrequeue;
- unsigned long TCPPrequeueDropped;
- unsigned long TCPHPHits;
- unsigned long TCPHPHitsToUser;
- unsigned long TCPPureAcks;
- unsigned long TCPHPAcks;
- unsigned long TCPRenoRecovery;
- unsigned long TCPSackRecovery;
- unsigned long TCPSACKReneging;
- unsigned long TCPFACKReorder;
- unsigned long TCPSACKReorder;
- unsigned long TCPRenoReorder;
- unsigned long TCPTSReorder;
- unsigned long TCPFullUndo;
- unsigned long TCPPartialUndo;
- unsigned long TCPDSACKUndo;
- unsigned long TCPLossUndo;
- unsigned long TCPLoss;
- unsigned long TCPLostRetransmit;
- unsigned long TCPRenoFailures;
- unsigned long TCPSackFailures;
- unsigned long TCPLossFailures;
- unsigned long TCPFastRetrans;
- unsigned long TCPForwardRetrans;
- unsigned long TCPSlowStartRetrans;
- unsigned long TCPTimeouts;
- unsigned long TCPRenoRecoveryFail;
- unsigned long TCPSackRecoveryFail;
- unsigned long TCPSchedulerFailed;
- unsigned long TCPRcvCollapsed;
- unsigned long TCPDSACKOldSent;
- unsigned long TCPDSACKOfoSent;
- unsigned long TCPDSACKRecv;
- unsigned long TCPDSACKOfoRecv;
- unsigned long TCPAbortOnSyn;
- unsigned long TCPAbortOnData;
- unsigned long TCPAbortOnClose;
- unsigned long TCPAbortOnMemory;
- unsigned long TCPAbortOnTimeout;
- unsigned long TCPAbortOnLinger;
- unsigned long TCPAbortFailed;
- unsigned long TCPMemoryPressures;
- unsigned long __pad[0];
+#define __SNMP_MIB_ALIGN__ ____cacheline_aligned
+
+/* IPstats */
+#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
+struct ipstats_mib {
+ unsigned long mibs[IPSTATS_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* ICMP */
+#define ICMP_MIB_DUMMY __ICMP_MIB_MAX
+#define ICMP_MIB_MAX (__ICMP_MIB_MAX + 1)
+
+struct icmp_mib {
+ unsigned long mibs[ICMP_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* ICMP6 (IPv6-ICMP) */
+#define ICMP6_MIB_MAX __ICMP6_MIB_MAX
+struct icmpv6_mib {
+ unsigned long mibs[ICMP6_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* TCP */
+#define TCP_MIB_MAX __TCP_MIB_MAX
+struct tcp_mib {
+ unsigned long mibs[TCP_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* UDP */
+#define UDP_MIB_MAX __UDP_MIB_MAX
+struct udp_mib {
+ unsigned long mibs[UDP_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* SCTP */
+#define SCTP_MIB_MAX __SCTP_MIB_MAX
+struct sctp_mib {
+ unsigned long mibs[SCTP_MIB_MAX];
+} __SNMP_MIB_ALIGN__;
+
+/* Linux */
+#define LINUX_MIB_MAX __LINUX_MIB_MAX
+struct linux_mib {
+ unsigned long mibs[LINUX_MIB_MAX];
};
/*
- * FIXME: On x86 and some other CPUs the split into user and softirq parts is not needed because
- * addl $1,memory is atomic against interrupts (but atomic_inc would be overkill because of the lock
- * cycles). Wants new nonlocked_atomic_inc() primitives -AK
+ * FIXME: On x86 and some other CPUs the split into user and softirq parts
+ * is not needed because addl $1,memory is atomic against interrupts (but
+ * atomic_inc would be overkill because of the lock cycles). Wants new
+ * nonlocked_atomic_inc() primitives -AK
*/
#define DEFINE_SNMP_STAT(type, name) \
__typeof__(type) *name[2]
#define SNMP_STAT_USRPTR(name) (name[1])
#define SNMP_INC_STATS_BH(mib, field) \
- (per_cpu_ptr(mib[0], smp_processor_id())->field++)
+ (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field]++)
#define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \
- ((*((&per_cpu_ptr(mib[0], smp_processor_id())->field) + (offset)))++)
+ (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field + (offset)]++)
#define SNMP_INC_STATS_USER(mib, field) \
- (per_cpu_ptr(mib[1], smp_processor_id())->field++)
+ (per_cpu_ptr(mib[1], smp_processor_id())->mibs[field]++)
#define SNMP_INC_STATS(mib, field) \
- (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->field++)
+ (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->mibs[field]++)
#define SNMP_DEC_STATS(mib, field) \
- (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->field--)
+ (per_cpu_ptr(mib[!in_softirq()], smp_processor_id())->mibs[field]--)
#define SNMP_ADD_STATS_BH(mib, field, addend) \
- (per_cpu_ptr(mib[0], smp_processor_id())->field += addend)
+ (per_cpu_ptr(mib[0], smp_processor_id())->mibs[field] += addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \
- (per_cpu_ptr(mib[1], smp_processor_id())->field += addend)
-
+ (per_cpu_ptr(mib[1], smp_processor_id())->mibs[field] += addend)
+
#endif
*/
/* Define this to get the sk->sk_debug debugging facility. */
-#define SOCK_DEBUGGING
+//#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
#define SOCK_DEBUG(sk, msg...) do { if ((sk) && ((sk)->sk_debug)) \
printk(KERN_DEBUG msg); } while (0)
* @sk_timer - sock cleanup timer
* @sk_stamp - time stamp of last packet received
* @sk_socket - Identd and reporting IO signals
- * @sk_user_data - RPC layer private data
+ * @sk_user_data - RPC and Tux layer private data
* @sk_owner - module that owns this socket
* @sk_sndmsg_page - cached page for sendmsg
* @sk_sndmsg_off - cached offset for sendmsg
* @sk_data_ready - callback to indicate there is data to be processed
* @sk_write_space - callback to indicate there is bf sending space available
* @sk_error_report - callback to indicate errors (e.g. %MSG_ERRQUEUE)
+ * @sk_create_child - callback to get new socket events
* @sk_backlog_rcv - callback to process the backlog
* @sk_destruct - called at sock freeing time, i.e. when all refcnt == 0
*/
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);
+ void (*sk_create_child)(struct sock *sk, struct sock *newsk);
void (*sk_destruct)(struct sock *sk);
};
extern void tcp_push_one(struct sock *, unsigned mss_now);
extern void tcp_send_ack(struct sock *sk);
extern void tcp_send_delayed_ack(struct sock *sk);
+extern void cleanup_rbuf(struct sock *sk, int copied);
/* tcp_timer.c */
extern void tcp_init_xmit_timers(struct sock *);
/* Return 0, if packet can be sent now without violation Nagle's rules:
1. It is full sized.
2. Or it contains FIN.
- 3. Or TCP_NODELAY was set.
- 4. Or TCP_CORK is not set, and all sent packets are ACKed.
+ 3. Or higher layers meant to force a packet boundary, hence the PSH bit.
+ 4. Or TCP_NODELAY was set.
+ 5. Or TCP_CORK is not set, and all sent packets are ACKed.
With Minshall's modification: all sent small packets are ACKed.
*/
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk->sk_backlog_rcv(sk, skb1);
- NET_INC_STATS_BH(TCPPrequeueDropped);
+ NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
}
tp->ucopy.memory = 0;
switch (state) {
case TCP_ESTABLISHED:
if (oldstate != TCP_ESTABLISHED)
- TCP_INC_STATS(TcpCurrEstab);
+ TCP_INC_STATS(TCP_MIB_CURRESTAB);
break;
case TCP_CLOSE:
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
- TCP_INC_STATS(TcpEstabResets);
+ TCP_INC_STATS(TCP_MIB_ESTABRESETS);
sk->sk_prot->unhash(sk);
if (tcp_sk(sk)->bind_hash &&
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
- TCP_DEC_STATS(TcpCurrEstab);
+ TCP_DEC_STATS(TCP_MIB_CURRESTAB);
}
/* Change state AFTER socket is unhashed to avoid closed
static inline void tcp_mib_init(void)
{
/* See RFC 2012 */
- TCP_ADD_STATS_USER(TcpRtoAlgorithm, 1);
- TCP_ADD_STATS_USER(TcpRtoMin, TCP_RTO_MIN*1000/HZ);
- TCP_ADD_STATS_USER(TcpRtoMax, TCP_RTO_MAX*1000/HZ);
- TCP_ADD_STATS_USER(TcpMaxConn, -1);
+ TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
+ TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
+ TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
}
/* /proc */
#define req_err(req) do { (req)->error = 1; Dprintk("request %p error at %s:%d.\n", req, __FILE__, __LINE__); } while (0)
-#define enough_wspace(sk) (tcp_wspace(sk) >= tcp_min_write_space(sk))
+#define enough_wspace(sk) (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
#define clear_keepalive(req) do { (req)->keep_alive = 0; Dprintk("keepalive cleared for req %p.\n", req); } while (0)
extern int print_all_requests (threadinfo_t *ti);
extern void udp_err(struct sk_buff *, u32);
-extern int udp_connect(struct sock *sk,
- struct sockaddr *usin, int addr_len);
extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len);
extern int xfrm4_output(struct sk_buff **pskb);
extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler);
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler);
-extern int xfrm4_tunnel_check_size(struct sk_buff *skb);
extern int xfrm6_rcv(struct sk_buff **pskb, unsigned int *nhoffp);
extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler);
extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler);
-extern int xfrm6_tunnel_check_size(struct sk_buff *skb);
extern u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
extern u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
+extern int xfrm6_output(struct sk_buff **pskb);
#ifdef CONFIG_XFRM
extern int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type);
*/
struct ccs_modesel_head {
- u8 _r1; /* reserved */
- u8 medium; /* device-specific medium type */
- u8 _r2; /* reserved */
- u8 block_desc_length; /* block descriptor length */
- u8 density; /* device-specific density code */
- u8 number_blocks_hi; /* number of blocks in this block desc */
- u8 number_blocks_med;
- u8 number_blocks_lo;
- u8 _r3;
- u8 block_length_hi; /* block length for blocks in this desc */
- u8 block_length_med;
- u8 block_length_lo;
+ __u8 _r1; /* reserved */
+ __u8 medium; /* device-specific medium type */
+ __u8 _r2; /* reserved */
+ __u8 block_desc_length; /* block descriptor length */
+ __u8 density; /* device-specific density code */
+ __u8 number_blocks_hi; /* number of blocks in this block desc */
+ __u8 number_blocks_med;
+ __u8 number_blocks_lo;
+ __u8 _r3;
+ __u8 block_length_hi; /* block length for blocks in this desc */
+ __u8 block_length_med;
+ __u8 block_length_lo;
};
/*
* ScsiLun: 8 byte LUN.
*/
struct scsi_lun {
- u8 scsi_lun[8];
+ __u8 scsi_lun[8];
};
/*
container_of(d, struct Scsi_Host, shost_classdev)
extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
-extern int scsi_add_host(struct Scsi_Host *, struct device *);
+extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *);
extern void scsi_scan_host(struct Scsi_Host *);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
#ifndef _SCSI_GENERIC_H
#define _SCSI_GENERIC_H
+#include <linux/compiler.h>
+
/*
History:
Started: Aug 9 by Lawrence Foard (entropy@world.std.com), to allow user
http://www.torque.net/sg/p/scsi-generic_long.txt
A version of this document (potentially out of date) may also be found in
the kernel source tree, probably at:
- /usr/src/linux/Documentation/scsi/scsi-generic.txt .
+ Documentation/scsi/scsi-generic.txt .
Utility and test programs are available at the sg web site. They are
bundled as sg_utils (for the lk 2.2 series) and sg3_utils (for the
#include <linux/time.h>
#include <asm/byteorder.h>
-#if __LITTLE_ENDIAN == 1234
+#ifdef __LITTLE_ENDIAN
#define SNDRV_LITTLE_ENDIAN
-#elif __BIG_ENDIAN == 4321
+#else
+#ifdef __BIG_ENDIAN
#define SNDRV_BIG_ENDIAN
#else
#error "Unsupported endian..."
#endif
+#endif
#else /* !__KERNEL__ */
int (*release) (snd_info_entry_t * entry,
unsigned short mode, void *file_private_data);
long (*read) (snd_info_entry_t *entry, void *file_private_data,
- struct file * file, char __user *buf, long count);
+ struct file * file, char __user *buf,
+ unsigned long count, unsigned long pos);
long (*write) (snd_info_entry_t *entry, void *file_private_data,
- struct file * file, const char __user *buf, long count);
+ struct file * file, const char __user *buf,
+ unsigned long count, unsigned long pos);
long long (*llseek) (snd_info_entry_t *entry, void *file_private_data,
struct file * file, long long offset, int orig);
unsigned int (*poll) (snd_info_entry_t *entry, void *file_private_data,
/*
* FIXME
* Ugh, we don't have PCI space, so map readb() and friends to use Zorro space
- * for MMIO accesses. This should make clgenfb work again on Amiga
+ * for MMIO accesses. This should make cirrusfb work again on Amiga
*/
+#undef inb_p
+#undef inw_p
+#undef outb_p
+#undef outw
+#undef readb
+#undef writeb
+#undef writew
#define inb_p(port) 0
#define inw_p(port) 0
#define outb_p(port, val) do { } while (0)
If unsure, say Y
-config STANDALONE
- bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
- default y
- help
- Select this option if you don't have magic firmware for drivers that
- need it.
-
- If unsure, say Y.
-
config BROKEN
bool
depends on !CLEAN_COMPILE
shm_unlock(shp);
if (!is_file_hugepages(shp->shm_file))
shmem_lock(shp->shm_file, 0, shp->mlock_user);
+ else
+ user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size,
+ shp->mlock_user);
fput (shp->shm_file);
security_shm_free(shp);
ipc_rcu_free(shp, sizeof(struct shmid_kernel));
shp->shm_perm.key = key;
shp->shm_flags = (shmflg & S_IRWXUGO);
+ shp->mlock_user = NULL;
shp->shm_perm.security = NULL;
error = security_shm_alloc(shp);
return error;
}
- if (shmflg & SHM_HUGETLB)
+ if (shmflg & SHM_HUGETLB) {
+ /* hugetlb_zero_setup takes care of mlock user accounting */
file = hugetlb_zero_setup(size);
- else {
+ shp->mlock_user = current->user;
+ } else {
sprintf (name, "SYSV%08x", key);
file = shmem_file_setup(name, size, VM_ACCOUNT);
}
shp->shm_nattch = 0;
shp->id = shm_buildid(id,shp->shm_perm.seq);
shp->shm_file = file;
- shp->mlock_user = NULL;
file->f_dentry->d_inode->i_ino = shp->id;
if (shmflg & SHM_HUGETLB)
set_file_hugepages(file);
case SHM_UNLOCK:
{
/* Allow superuser to lock segment in memory */
- if (!can_do_mlock()) {
+ if (!can_do_mlock() && cmd == SHM_LOCK) {
err = -EPERM;
goto out;
}
goto out_unlock;
if(cmd==SHM_LOCK) {
+ struct user_struct * user = current->user;
if (!is_file_hugepages(shp->shm_file)) {
- err = shmem_lock(shp->shm_file, 1, current->user);
- if (!err)
+ err = shmem_lock(shp->shm_file, 1, user);
+ if (!err) {
shp->shm_flags |= SHM_LOCKED;
+ shp->mlock_user = user;
+ }
}
- } else {
- if (!is_file_hugepages(shp->shm_file))
- shmem_lock(shp->shm_file, 0, shp->mlock_user);
+ } else if (!is_file_hugepages(shp->shm_file)) {
+ shmem_lock(shp->shm_file, 0, shp->mlock_user);
shp->shm_flags &= ~SHM_LOCKED;
+ shp->mlock_user = NULL;
}
shm_unlock(shp);
goto out;
granted_mode >>= 3;
/* is there some bit set in requested_mode but not in granted_mode? */
if ((requested_mode & ~granted_mode & 0007) &&
- !capable(CAP_IPC_OWNER)) {
- if (!can_do_mlock()) {
- return -1;
- }
- }
+ !capable(CAP_IPC_OWNER))
+ return -1;
return security_ipc_permission(ipcp, flag);
}
*/
memset((caddr_t)&ac, 0, sizeof(acct_t));
- ac.ac_version = ACCT_VERSION;
+ ac.ac_version = ACCT_VERSION | ACCT_BYTEORDER;
strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm));
elapsed = jiffies_64_to_AHZ(get_jiffies_64() - current->start_time);
old_encode_dev(tty_devnum(current->signal->tty)) : 0;
read_unlock(&tasklist_lock);
- /* ABYTESEX is always set to allow byte order detection */
- ac.ac_flag = ABYTESEX;
+ ac.ac_flag = 0;
if (current->flags & PF_FORKNOEXEC)
ac.ac_flag |= AFORK;
if (current->flags & PF_SUPERPRIV)
err = sys_clock_getres(which_clock,
(struct timespec __user *) &ts);
set_fs(oldfs);
- if (!err && put_compat_timespec(&ts, tp))
+ if (!err && tp && put_compat_timespec(&ts, tp))
return -EFAULT;
return err;
}
}
acct_process(code);
+ if (current->tux_info) {
+#ifdef CONFIG_TUX_DEBUG
+ printk("Possibly unexpected TUX-thread exit(%ld) at %p?\n",
+ code, __builtin_return_address(0));
+#endif
+ current->tux_exit();
+ }
__exit_mm(tsk);
exit_sem(tsk);
if (p->real_parent != p->parent) {
__ptrace_unlink(p);
p->state = TASK_ZOMBIE;
- /* If this is a detached thread, this is where it goes away. */
- if (p->exit_signal == -1) {
- /* release_task takes the lock itself. */
- write_unlock_irq(&tasklist_lock);
- release_task (p);
- }
- else {
+ /*
+ * If this is not a detached task, notify the parent. If it's
+ * still not detached after that, don't release it now.
+ */
+ if (p->exit_signal != -1) {
do_notify_parent(p, p->exit_signal);
- write_unlock_irq(&tasklist_lock);
+ if (p->exit_signal != -1)
+ p = NULL;
}
- p = NULL;
}
- else
- write_unlock_irq(&tasklist_lock);
+ write_unlock_irq(&tasklist_lock);
}
if (p != NULL)
release_task(p);
#include <linux/fs.h>
#include <linux/cpu.h>
#include <linux/security.h>
+#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ mm->free_area_cache = oldmm->mmap_base;
mm->map_count = 0;
mm->rss = 0;
cpus_clear(mm->cpu_vm_mask);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
-#ifdef __HAVE_ARCH_MMAP_TOP
- mm->mmap_top = mmap_top();
-#endif
return mm;
}
free_mm(mm);
spin_unlock(&mmlist_lock);
exit_aio(mm);
exit_mmap(mm);
+ put_swap_token(mm);
mmdrop(mm);
}
}
p = dup_task_struct(current);
if (!p)
goto fork_out;
+ p->tux_info = NULL;
retval = -EAGAIN;
if (atomic_read(&p->user->processes) >=
}
#endif
- retval = -ENOMEM;
if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
-#define symbol_is(literal, string) \
- (strcmp(MODULE_SYMBOL_PREFIX literal, (string)) == 0)
-
/* Protects module list */
static spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
return "machine";
if(pmdisk_info.cpus != num_online_cpus())
return "number of cpus";
- return 0;
+ return NULL;
}
* locking requirements, the list it's pulling from has to belong to a cpu
* which is dead and hence not processing interrupts.
*/
-static void rcu_move_batch(struct list_head *list)
+static void rcu_move_batch(struct rcu_head *list)
{
- struct list_head *entry;
- int cpu = smp_processor_id();
+ int cpu;
local_irq_disable();
- while (!list_empty(list)) {
- entry = list->next;
- list_del(entry);
- list_add_tail(entry, &RCU_nxtlist(cpu));
+
+ cpu = smp_processor_id();
+
+ while (list != NULL) {
+ *RCU_nxttail(cpu) = list;
+ RCU_nxttail(cpu) = &list->next;
+ list = list->next;
}
local_irq_enable();
}
spin_lock_bh(&rcu_state.mutex);
if (rcu_ctrlblk.cur != rcu_ctrlblk.completed)
cpu_quiet(cpu);
-unlock:
spin_unlock_bh(&rcu_state.mutex);
- rcu_move_batch(&RCU_curlist(cpu));
- rcu_move_batch(&RCU_nxtlist(cpu));
+ rcu_move_batch(RCU_curlist(cpu));
+ rcu_move_batch(RCU_nxtlist(cpu));
tasklet_kill_immediate(&RCU_tasklet(cpu), cpu);
}
{
unsigned long i, sum = 0;
- for_each_online_cpu(i)
+ for_each_cpu(i)
sum += cpu_rq(i)->nr_uninterruptible;
return sum;
{
unsigned long long i, sum = 0;
- for_each_online_cpu(i)
+ for_each_cpu(i)
sum += cpu_rq(i)->nr_switches;
return sum;
{
unsigned long i, sum = 0;
- for_each_online_cpu(i)
+ for_each_cpu(i)
sum += atomic_read(&cpu_rq(i)->nr_iowait);
return sum;
sched_domain_init.groups = &sched_group_init;
sched_domain_init.last_balance = jiffies;
sched_domain_init.balance_interval = INT_MAX; /* Don't balance */
+ sched_domain_init.busy_factor = 1;
memset(&sched_group_init, 0, sizeof(struct sched_group));
sched_group_init.cpumask = CPU_MASK_ALL;
}
}
+EXPORT_SYMBOL_GPL(flush_signal_handlers);
/* Notify the system that a driver wants to block all signals for this
* process, and wants to be notified if any signals at all were to be
#if defined(CONFIG_PPC32) && defined(CONFIG_6xx)
extern unsigned long powersave_nap;
int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp);
+ void __user *buffer, size_t *lenp);
#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
ctl_table *, void **);
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp);
+ void __user *buffer, size_t *lenp, loff_t *ppos);
static ctl_table root_table[];
static struct ctl_table_header root_table_header =
extern ctl_table pty_table[];
#endif
+int sysctl_legacy_va_layout;
+
/* /proc declarations: */
#ifdef CONFIG_PROC_FS
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
+ {
+ .ctl_name = VM_LEGACY_VA_LAYOUT,
+ .procname = "legacy_va_layout",
+ .data = &sysctl_legacy_va_layout,
+ .maxlen = sizeof(sysctl_legacy_va_layout),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ },
{ .ctl_name = 0 }
};
res = count;
- /*
- * FIXME: we need to pass on ppos to the handler.
- */
-
- error = (*table->proc_handler) (table, write, file, buf, &res);
+ error = (*table->proc_handler) (table, write, file, buf, &res, ppos);
if (error)
return error;
return res;
* Returns 0 on success.
*/
int proc_dostring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
size_t len;
char __user *p;
char c;
if (!table->data || !table->maxlen || !*lenp ||
- (filp->f_pos && !write)) {
+ (*ppos && !write)) {
*lenp = 0;
return 0;
}
if(copy_from_user(table->data, buffer, len))
return -EFAULT;
((char *) table->data)[len] = 0;
- filp->f_pos += *lenp;
+ *ppos += *lenp;
} else {
len = strlen(table->data);
if (len > table->maxlen)
len++;
}
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
}
return 0;
}
*/
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int r;
if (!write) {
down_read(&uts_sem);
- r=proc_dostring(table,0,filp,buffer,lenp);
+ r=proc_dostring(table,0,filp,buffer,lenp, ppos);
up_read(&uts_sem);
} else {
down_write(&uts_sem);
- r=proc_dostring(table,1,filp,buffer,lenp);
+ r=proc_dostring(table,1,filp,buffer,lenp, ppos);
up_write(&uts_sem);
}
return r;
}
static int do_proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp,
+ void __user *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(int *negp, unsigned long *lvalp, int *valp,
int write, void *data),
void *data)
{
-#define TMPBUFLEN 20
+#define TMPBUFLEN 21
int *i, vleft, first=1, neg, val;
unsigned long lval;
size_t left, len;
char __user *s = buffer;
if (!table->data || !table->maxlen || !*lenp ||
- (filp->f_pos && !write)) {
+ (*ppos && !write)) {
*lenp = 0;
return 0;
}
if (write && first)
return -EINVAL;
*lenp -= left;
- filp->f_pos += *lenp;
+ *ppos += *lenp;
return 0;
#undef TMPBUFLEN
}
* Returns 0 on success.
*/
int proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
NULL,NULL);
}
*/
int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int op;
}
op = (current->pid == 1) ? OP_SET : OP_AND;
- return do_proc_dointvec(table,write,filp,buffer,lenp,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
do_proc_dointvec_bset_conv,&op);
}
* Returns 0 on success.
*/
int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct do_proc_dointvec_minmax_conv_param param = {
.min = (int *) table->extra1,
.max = (int *) table->extra2,
};
- return do_proc_dointvec(table, write, filp, buffer, lenp,
+ return do_proc_dointvec(table, write, filp, buffer, lenp, ppos,
do_proc_dointvec_minmax_conv, ¶m);
}
static int do_proc_doulongvec_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer, size_t *lenp,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos,
unsigned long convmul,
unsigned long convdiv)
{
-#define TMPBUFLEN 20
+#define TMPBUFLEN 21
unsigned long *i, *min, *max, val;
int vleft, first=1, neg;
size_t len, left;
char __user *s = buffer;
if (!table->data || !table->maxlen || !*lenp ||
- (filp->f_pos && !write)) {
+ (*ppos && !write)) {
*lenp = 0;
return 0;
}
if (write && first)
return -EINVAL;
*lenp -= left;
- filp->f_pos += *lenp;
+ *ppos += *lenp;
return 0;
#undef TMPBUFLEN
}
* Returns 0 on success.
*/
int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, 1l, 1l);
+ return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l);
}
/**
*/
int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
return do_proc_doulongvec_minmax(table, write, filp, buffer,
- lenp, HZ, 1000l);
+ lenp, ppos, HZ, 1000l);
}
* Returns 0 on success.
*/
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
do_proc_dointvec_jiffies_conv,NULL);
}
* Returns 0 on success.
*/
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return do_proc_dointvec(table,write,filp,buffer,lenp,
+ return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
do_proc_dointvec_userhz_jiffies_conv,NULL);
}
#else /* CONFIG_PROC_FS */
int proc_dostring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
}
int proc_dostring(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
psecs = (p->utime += user);
psecs += (p->stime += system);
- if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
+ if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_cur) {
/* Send SIGXCPU every second.. */
if (!(psecs % HZ))
send_sig(SIGXCPU, p, 1);
/* and SIGKILL when we go over max.. */
- if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
+ if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_max)
send_sig(SIGKILL, p, 1);
}
}
/* nothing */;
return tmp;
}
+EXPORT_SYMBOL(strcpy);
#endif
#ifndef __HAVE_ARCH_STRNCPY
}
return dest;
}
+EXPORT_SYMBOL(strncpy);
#endif
#ifndef __HAVE_ARCH_STRLCPY
return tmp;
}
+EXPORT_SYMBOL(strcat);
#endif
#ifndef __HAVE_ARCH_STRNCAT
return tmp;
}
+EXPORT_SYMBOL(strncat);
#endif
#ifndef __HAVE_ARCH_STRLCAT
return __res;
}
+EXPORT_SYMBOL(strcmp);
#endif
#ifndef __HAVE_ARCH_STRNCMP
return __res;
}
+EXPORT_SYMBOL(strncmp);
#endif
#ifndef __HAVE_ARCH_STRCHR
return NULL;
return (char *) s;
}
+EXPORT_SYMBOL(strchr);
#endif
#ifndef __HAVE_ARCH_STRRCHR
} while (--p >= s);
return NULL;
}
+EXPORT_SYMBOL(strrchr);
#endif
#ifndef __HAVE_ARCH_STRNCHR
return (char *) s;
return NULL;
}
+EXPORT_SYMBOL(strnchr);
#endif
#ifndef __HAVE_ARCH_STRLEN
/* nothing */;
return sc - s;
}
+EXPORT_SYMBOL(strlen);
#endif
#ifndef __HAVE_ARCH_STRNLEN
/* nothing */;
return sc - s;
}
+EXPORT_SYMBOL(strnlen);
#endif
#ifndef __HAVE_ARCH_STRSPN
return count;
}
+EXPORT_SYMBOL(strcspn);
#ifndef __HAVE_ARCH_STRPBRK
/**
}
return NULL;
}
+EXPORT_SYMBOL(strpbrk);
#endif
#ifndef __HAVE_ARCH_STRSEP
return s;
}
+EXPORT_SYMBOL(memset);
#endif
#ifndef __HAVE_ARCH_BCOPY
while (count--)
*dest++ = *src++;
}
+EXPORT_SYMBOL(bcopy);
#endif
#ifndef __HAVE_ARCH_MEMCPY
return dest;
}
+EXPORT_SYMBOL(memcpy);
#endif
#ifndef __HAVE_ARCH_MEMMOVE
return dest;
}
+EXPORT_SYMBOL(memmove);
#endif
#ifndef __HAVE_ARCH_MEMCMP
break;
return res;
}
+EXPORT_SYMBOL(memcmp);
#endif
#ifndef __HAVE_ARCH_MEMSCAN
}
return (void *) p;
}
+EXPORT_SYMBOL(memscan);
#endif
#ifndef __HAVE_ARCH_STRSTR
}
return NULL;
}
+EXPORT_SYMBOL(strstr);
#endif
#ifndef __HAVE_ARCH_MEMCHR
}
return NULL;
}
-
+EXPORT_SYMBOL(memchr);
#endif
qualifier = -1;
if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
*fmt == 'Z' || *fmt == 'z') {
- qualifier = *fmt;
- fmt++;
+ qualifier = *fmt++;
+ if (unlikely(qualifier == *fmt)) {
+ if (qualifier == 'h') {
+ qualifier = 'H';
+ fmt++;
+ } else if (qualifier == 'l') {
+ qualifier = 'L';
+ fmt++;
+ }
+ }
}
base = 10;
is_sign = 0;
break;
switch(qualifier) {
+ case 'H': /* that's 'hh' in format */
+ if (is_sign) {
+ signed char *s = (signed char *) va_arg(args,signed char *);
+ *s = (signed char) simple_strtol(str,&next,base);
+ } else {
+ unsigned char *s = (unsigned char *) va_arg(args, unsigned char *);
+ *s = (unsigned char) simple_strtoul(str, &next, base);
+ }
+ break;
case 'h':
if (is_sign) {
short *s = (short *) va_arg(args,short *);
readahead.o slab.o swap.o truncate.o vmscan.o \
$(mmu-y)
-obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
+obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
obj-$(CONFIG_X86_4G) += usercopy.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
{
struct page *page;
- /*
- * We scan the hash list read-only. Addition to and removal from
- * the hash-list needs a held write-lock.
- */
spin_lock_irq(&mapping->tree_lock);
page = radix_tree_lookup(&mapping->page_tree, offset);
if (page)
struct file * filp,
loff_t *ppos,
read_descriptor_t * desc,
- read_actor_t actor)
+ read_actor_t actor,
+ int nonblock)
{
struct inode *inode = mapping->host;
unsigned long index, end_index, offset;
find_page:
page = find_get_page(mapping, index);
if (unlikely(page == NULL)) {
+ if (nonblock) {
+ desc->error = -EWOULDBLOCKIO;
+ break;
+ }
handle_ra_miss(mapping, &ra, index);
goto no_cached_page;
}
- if (!PageUptodate(page))
+ if (!PageUptodate(page)) {
+ if (nonblock) {
+ page_cache_release(page);
+ desc->error = -EWOULDBLOCKIO;
+ break;
+ }
goto page_not_up_to_date;
+ }
page_ok:
/* nr is the maximum number of bytes to copy from this page */
nr = PAGE_CACHE_SIZE;
if (desc.count == 0)
continue;
desc.error = 0;
- do_generic_file_read(filp,ppos,&desc,file_read_actor);
+ do_generic_file_read(filp,ppos,&desc,file_read_actor,0);
retval += desc.written;
if (!retval) {
retval = desc.error;
desc.arg.data = target;
desc.error = 0;
- do_generic_file_read(in_file, ppos, &desc, actor);
+ do_generic_file_read(in_file, ppos, &desc, actor, 0);
if (desc.written)
return desc.written;
return desc.error;
* effect.
*/
error = page_cache_read(file, pgoff);
+ grab_swap_token();
/*
* The page we want has now been added to the page cache.
return err;
}
} else {
- /*
- * If a nonlinear mapping then store the file page offset
- * in the pte.
- */
- if (pgoff != linear_page_index(vma, addr)) {
- err = install_file_pte(mm, vma, addr, pgoff, prot);
- if (err)
- return err;
- }
+ err = install_file_pte(mm, vma, addr, pgoff, prot);
+ if (err)
+ return err;
}
len -= PAGE_SIZE;
int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, struct page *page, pgprot_t prot)
{
+ struct inode *inode;
+ pgoff_t size;
int err = -ENOMEM;
pte_t *pte;
pgd_t *pgd;
if (!pte)
goto err_unlock;
+ /*
+ * This page may have been truncated. Tell the
+ * caller about it.
+ */
+ err = -EINVAL;
+ inode = vma->vm_file->f_mapping->host;
+ size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if (!page->mapping || page->index >= size)
+ goto err_unlock;
+
zap_pte(mm, vma, addr, pte);
mm->rss++;
{
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, *org_vec;
- int i;
+ int i, err = 0;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- goto out_eio;
-
- set_bit(BIO_UPTODATE, &bio_orig->bi_flags);
+ err = -EIO;
/*
* free up bounce indirect pages used
mempool_free(bvec->bv_page, pool);
}
-out_eio:
- bio_endio(bio_orig, bio_orig->bi_size, 0);
+ bio_endio(bio_orig, bio_orig->bi_size, err);
bio_put(bio);
}
#ifdef CONFIG_SYSCTL
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer,
- size_t *length)
+ size_t *length, loff_t *ppos)
{
- proc_doulongvec_minmax(table, write, file, buffer, length);
+ proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
max_huge_pages = set_max_huge_pages(max_huge_pages);
return 0;
}
pte_t *pte;
if (write) /* user gate pages are read-only */
return i ? : -EFAULT;
- pgd = pgd_offset(mm, pg);
+ pgd = pgd_offset_gate(mm, pg);
if (!pgd)
return i ? : -EFAULT;
pmd = pmd_offset(pgd, pg);
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
inc_page_state(pgmajfault);
+ grab_swap_token();
}
mark_page_accessed(page);
up_write(¤t->mm->mmap_sem);
return ret;
}
+
+/*
+ * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
+ * shm segments) get accounted against the user_struct instead.
+ */
+static spinlock_t shmlock_user_lock = SPIN_LOCK_UNLOCKED;
+
+int user_shm_lock(size_t size, struct user_struct *user)
+{
+ unsigned long lock_limit, locked;
+ int allowed = 0;
+
+ spin_lock(&shmlock_user_lock);
+ locked = size >> PAGE_SHIFT;
+ lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ lock_limit >>= PAGE_SHIFT;
+ if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
+ goto out;
+ get_uid(user);
+ user->locked_shm += locked;
+ allowed = 1;
+out:
+ spin_unlock(&shmlock_user_lock);
+ return allowed;
+}
+
+void user_shm_unlock(size_t size, struct user_struct *user)
+{
+ spin_lock(&shmlock_user_lock);
+ user->locked_shm -= (size >> PAGE_SHIFT);
+ spin_unlock(&shmlock_user_lock);
+ free_uid(user);
+}
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
int accountable = 1;
unsigned long charged = 0;
+ /*
+ * Does the application expect PROT_READ to imply PROT_EXEC:
+ */
+ if (unlikely((prot & PROT_READ) &&
+ (current->personality & READ_IMPLIES_EXEC)))
+ prot |= PROT_EXEC;
+
if (file) {
if (is_file_hugepages(file))
accountable = 0;
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
- addr = get_unmapped_area(file, addr, len, pgoff, flags, prot & PROT_EXEC);
+ addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
if (addr & ~PAGE_MASK)
return addr;
* This function "knows" that -ENOMEM has the bits set.
*/
#ifndef HAVE_ARCH_UNMAPPED_AREA
-static inline unsigned long
+unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
+ unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
addr = vma->vm_end;
}
}
-#else
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long, unsigned long);
#endif
+void arch_unmap_area(struct vm_area_struct *area)
+{
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+ if (area->vm_start >= TASK_UNMAPPED_BASE &&
+ area->vm_start < area->vm_mm->free_area_cache)
+ area->vm_mm->free_area_cache = area->vm_start;
+}
+
+/*
+ * This mmap-allocator allocates new areas top-down from below the
+ * stack's low limit (the base):
+ */
unsigned long
-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags, unsigned long exec)
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+{
+ struct vm_area_struct *vma, *prev_vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long base = mm->mmap_base, addr = addr0;
+ int first_time = 1;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ /* dont allow allocations above current base */
+ if (mm->free_area_cache > base)
+ mm->free_area_cache = base;
+
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+try_again:
+ /* make sure it can fit in the remaining address space */
+ if (mm->free_area_cache < len)
+ goto fail;
+
+ /* either no address requested or cant fit in requested address hole */
+ addr = (mm->free_area_cache - len) & PAGE_MASK;
+ do {
+ /*
+ * Lookup failure means no vma is above this address,
+ * i.e. return with success:
+ */
+ if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+ return addr;
+
+ /*
+ * new region fits between prev_vma->vm_end and
+ * vma->vm_start, use it:
+ */
+ if (addr+len <= vma->vm_start &&
+ (!prev_vma || (addr >= prev_vma->vm_end)))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ else
+ /* pull free_area_cache down to the first hole */
+ if (mm->free_area_cache == vma->vm_end)
+ mm->free_area_cache = vma->vm_start;
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start-len;
+ } while (len <= vma->vm_start);
+
+fail:
+ /*
+ * if hint left us with no space for the requested
+ * mapping then try again:
+ */
+ if (first_time) {
+ mm->free_area_cache = base;
+ first_time = 0;
+ goto try_again;
+ }
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+ mm->free_area_cache = base;
+
+ return addr;
+}
+
+void arch_unmap_area_topdown(struct vm_area_struct *area)
+{
+ /*
+ * Is this a new hole at the highest possible address?
+ */
+ if (area->vm_end > area->vm_mm->free_area_cache)
+ area->vm_mm->free_area_cache = area->vm_end;
+}
+
+
+unsigned long
+get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags, int exec)
{
if (flags & MAP_FIXED) {
unsigned long ret;
return file->f_op->get_unmapped_area(file, addr, len,
pgoff, flags);
- return arch_get_unmapped_area(file, addr, len, pgoff, flags, exec);
+ if (exec && current->mm->get_unmapped_exec_area)
+ return current->mm->get_unmapped_exec_area(file, addr, len, pgoff, flags);
+ else
+ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
}
-EXPORT_SYMBOL(get_unmapped_area);
+EXPORT_SYMBOL(get_unmapped_area_prot);
+
+
+#define SHLIB_BASE 0x00111000
+
+unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
+ unsigned long len0, unsigned long pgoff, unsigned long flags)
+{
+ unsigned long addr = addr0, len = len0;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long tmp;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (!addr && !(flags & MAP_FIXED))
+ addr = randomize_range(SHLIB_BASE, 0x01000000, len);
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start)) {
+ return addr;
+ }
+ }
+
+ addr = SHLIB_BASE;
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr) {
+ return -ENOMEM;
+ }
+ if (!vma || addr + len <= vma->vm_start) {
+ /*
+ * Must not let a PROT_EXEC mapping get into the
+ * brk area:
+ */
+ if (addr + len > mm->brk)
+ goto failed;
+
+ /*
+ * Up until the brk area we randomize addresses
+ * as much as possible:
+ */
+ if (addr >= 0x01000000) {
+ tmp = randomize_range(0x01000000, mm->brk, len);
+ vma = find_vma(mm, tmp);
+ if (TASK_SIZE - len >= tmp &&
+ (!vma || tmp + len <= vma->vm_start))
+ return tmp;
+ }
+ /*
+ * Ok, randomization didnt work out - return
+ * the result of the linear search:
+ */
+ return addr;
+ }
+ addr = vma->vm_end;
+ }
+
+failed:
+ return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
+}
+
+
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
{
size_t len = area->vm_end - area->vm_start;
- unsigned long old_end = area->vm_end;
area->vm_mm->total_vm -= len >> PAGE_SHIFT;
if (area->vm_flags & VM_LOCKED)
area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
- /*
- * Is this a new hole at the lowest possible address?
- */
- if (area->vm_start >= TASK_UNMAPPED_BASE &&
- area->vm_start < area->vm_mm->free_area_cache)
- area->vm_mm->free_area_cache = area->vm_start;
- /*
- * Is this a new hole at the highest possible address?
- */
- if (area->vm_start > area->vm_mm->non_executable_cache)
- area->vm_mm->non_executable_cache = area->vm_start;
+ area->vm_mm->unmap_area(area);
remove_vm_struct(area);
- if (unlikely(area->vm_flags & VM_EXEC))
- arch_remove_exec_range(mm, old_end);
}
/*
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
+#include <linux/personality.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
return -EINVAL;
if (end == start)
return 0;
+ /*
+ * Does the application expect PROT_READ to imply PROT_EXEC:
+ */
+ if (unlikely((prot & PROT_READ) &&
+ (current->personality & READ_IMPLIES_EXEC)))
+ prot |= PROT_EXEC;
vm_flags = calc_vm_prot_bits(prot);
if (vma->vm_flags & VM_MAYSHARE)
map_flags |= MAP_SHARED;
- new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
- vma->vm_pgoff, map_flags,
- vma->vm_flags & VM_EXEC);
+ new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len,
+ vma->vm_pgoff, map_flags, vma->vm_flags & VM_EXEC);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out;
/**
* out_of_memory - is the system out of memory?
*/
-void out_of_memory(void)
+void out_of_memory(int gfp_mask)
{
/*
* oom_lock protects out_of_memory()'s static variables.
*/
lastkill = now;
+ printk("oom-killer: gfp_mask=0x%x\n", gfp_mask);
+ show_free_areas();
+
/* oom_kill() sleeps */
spin_unlock(&oom_lock);
oom_kill();
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length)
+ struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, file, buffer, length);
+ proc_dointvec(table, write, file, buffer, length, ppos);
if (dirty_writeback_centisecs) {
mod_timer(&wb_timer,
jiffies + (dirty_writeback_centisecs * HZ) / 100);
EXPORT_SYMBOL(nr_free_pages);
-unsigned int nr_used_zone_pages(void)
-{
- unsigned int pages = 0;
- struct zone *zone;
-
- for_each_zone(zone)
- pages += zone->nr_active + zone->nr_inactive;
-
- return pages;
-}
-
#ifdef CONFIG_NUMA
unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
{
for (i = 0; i < MAX_NR_ZONES; i++)
realtotalpages -= zholes_size[i];
pgdat->node_present_pages = realtotalpages;
- printk("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
+ printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
}
INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
- if (!is_highmem(zone))
+ if (!is_highmem_idx(zone))
set_page_address(page, __va(start_pfn << PAGE_SHIFT));
#endif
start_pfn++;
pcp->batch = 1 * batch;
INIT_LIST_HEAD(&pcp->list);
}
- printk(" %s zone: %lu pages, LIFO batch:%lu\n",
+ printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
zone_names[j], realsize, batch);
INIT_LIST_HEAD(&zone->active_list);
INIT_LIST_HEAD(&zone->inactive_list);
* changes.
*/
int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length)
+ struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, file, buffer, length);
+ proc_dointvec(table, write, file, buffer, length, ppos);
setup_per_zone_pages_min();
setup_per_zone_protection();
return 0;
* whenever sysctl_lower_zone_protection changes.
*/
int lower_zone_protection_sysctl_handler(ctl_table *table, int write,
- struct file *file, void __user *buffer, size_t *length)
+ struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec_minmax(table, write, file, buffer, length);
+ proc_dointvec_minmax(table, write, file, buffer, length, ppos);
setup_per_zone_protection();
return 0;
}
struct file *filp, unsigned long offset)
{
unsigned max;
- unsigned min;
unsigned orig_next_size;
unsigned actual;
int first_access=0;
if (max == 0)
goto out; /* No readahead */
- min = get_min_readahead(ra);
orig_next_size = ra->next_size;
if (ra->next_size == 0) {
* pages shall be accessed in the next
* current window.
*/
- ra->next_size = min(ra->average , (unsigned long)max);
+ average = ra->average;
+ if (ra->serial_cnt > average)
+ average = (ra->serial_cnt + ra->average + 1) / 2;
+
+ ra->next_size = min(average , (unsigned long)max);
}
ra->start = offset;
ra->size = ra->next_size;
ra->size = max;
ra->ahead_start = 0;
ra->ahead_size = 0;
+ ra->average = max / 2;
}
}
ra->prev_page = offset;
if (ptep_clear_flush_young(vma, address, pte))
referenced++;
+ if (mm != current->mm && has_swap_token(mm))
+ referenced++;
+
(*mapcount)--;
out_unmap:
* an exclusive swap page, do_wp_page will replace it by a copy
* page, and the user never get to see the data GUP was holding
* the original page for.
+ *
+ * This test is also useful for when swapoff (unuse_process) has
+ * to drop page lock: its reference to the page stops existing
+ * ptes from being unmapped, so swapoff can make progress.
*/
if (PageSwapCache(page) &&
page_count(page) != page->mapcount + 2) {
return err;
}
} else if (nonblock) {
- /*
- * If a nonlinear mapping then store the file page
- * offset in the pte.
- */
- if (pgoff != linear_page_index(vma, addr)) {
- err = install_file_pte(mm, vma, addr, pgoff, prot);
- if (err)
- return err;
- }
+ err = install_file_pte(mm, vma, addr, pgoff, prot);
+ if (err)
+ return err;
}
len -= PAGE_SIZE;
}
#endif
-/* Protects current->user->locked_shm from concurrent access */
-static spinlock_t shmem_lock_user = SPIN_LOCK_UNLOCKED;
-
-int shmem_lock(struct file *file, int lock, struct user_struct * user)
+int shmem_lock(struct file *file, int lock, struct user_struct *user)
{
struct inode *inode = file->f_dentry->d_inode;
struct shmem_inode_info *info = SHMEM_I(inode);
- unsigned long lock_limit, locked;
int retval = -ENOMEM;
spin_lock(&info->lock);
- spin_lock(&shmem_lock_user);
if (lock && !(info->flags & VM_LOCKED)) {
- locked = inode->i_size >> PAGE_SHIFT;
- locked += user->locked_shm;
- lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
- lock_limit >>= PAGE_SHIFT;
- if ((locked > lock_limit) && !capable(CAP_IPC_LOCK))
+ if (!user_shm_lock(inode->i_size, user))
goto out_nomem;
- /* for this branch user == current->user so it won't go away under us */
- atomic_inc(&user->__count);
- user->locked_shm = locked;
+ info->flags |= VM_LOCKED;
}
if (!lock && (info->flags & VM_LOCKED) && user) {
- locked = inode->i_size >> PAGE_SHIFT;
- user->locked_shm -= locked;
- free_uid(user);
- }
- if (lock)
- info->flags |= VM_LOCKED;
- else
+ user_shm_unlock(inode->i_size, user);
info->flags &= ~VM_LOCKED;
+ }
retval = 0;
out_nomem:
- spin_unlock(&shmem_lock_user);
spin_unlock(&info->lock);
return retval;
}
/*
* This file contains the default values for the opereation of the
* Linux VM subsystem. Fine-tuning documentation can be found in
- * linux/Documentation/sysctl/vm.txt.
+ * Documentation/sysctl/vm.txt.
* Started 18.12.91
* Swap aging added 23.2.95, Stephen Tweedie.
* Buffermem limits added 12.3.98, Rik van Riel.
check_next_cluster:
if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
{
- int nr;
+ unsigned long nr;
for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
if (si->swap_map[nr])
{
/*
* Go through process' page directory.
*/
- down_read(&mm->mmap_sem);
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ /*
+ * Our reference to the page stops try_to_unmap_one from
+ * unmapping its ptes, so swapoff can make progress.
+ */
+ unlock_page(page);
+ down_read(&mm->mmap_sem);
+ lock_page(page);
+ }
spin_lock(&mm->page_table_lock);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!is_vm_hugetlb_page(vma)) {
* slab to avoid swapping.
*
* We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
+ *
+ * `lru_pages' represents the number of on-LRU pages in all the zones which
+ * are eligible for the caller's allocation attempt. It is used for balancing
+ * slab reclaim versus page reclaim.
*/
-static int shrink_slab(unsigned long scanned, unsigned int gfp_mask)
+static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
+ unsigned long lru_pages)
{
struct shrinker *shrinker;
- long pages;
if (down_trylock(&shrinker_sem))
return 0;
- pages = nr_used_zone_pages();
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
delta = (4 * scanned) / shrinker->seeks;
delta *= (*shrinker->shrinker)(0, gfp_mask);
- do_div(delta, pages + 1);
+ do_div(delta, lru_pages + 1);
shrinker->nr += delta;
if (shrinker->nr < 0)
shrinker->nr = LONG_MAX; /* It wrapped! */
int total_scanned = 0, total_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct scan_control sc;
+ unsigned long lru_pages = 0;
int i;
sc.gfp_mask = gfp_mask;
inc_page_state(allocstall);
- for (i = 0; zones[i] != 0; i++)
- zones[i]->temp_priority = DEF_PRIORITY;
+ for (i = 0; zones[i] != NULL; i++) {
+ struct zone *zone = zones[i];
+
+ zone->temp_priority = DEF_PRIORITY;
+ lru_pages += zone->nr_active + zone->nr_inactive;
+ }
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
sc.nr_mapped = read_page_state(nr_mapped);
sc.nr_reclaimed = 0;
sc.priority = priority;
shrink_caches(zones, &sc);
- shrink_slab(sc.nr_scanned, gfp_mask);
+ shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
if (reclaim_state) {
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
blk_congestion_wait(WRITE, HZ/10);
}
if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY))
- out_of_memory();
+ out_of_memory(gfp_mask);
out:
for (i = 0; zones[i] != 0; i++)
zones[i]->prev_priority = zones[i]->temp_priority;
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
int all_zones_ok = 1;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
-
+ unsigned long lru_pages = 0;
if (nr_pages == 0) {
/*
end_zone = pgdat->nr_zones - 1;
}
scan:
+ for (i = 0; i <= end_zone; i++) {
+ struct zone *zone = pgdat->node_zones + i;
+
+ lru_pages += zone->nr_active + zone->nr_inactive;
+ }
+
/*
* Now scan the zone in the dma->highmem direction, stopping
* at the last zone which needs scanning.
sc.priority = priority;
shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
- shrink_slab(sc.nr_scanned, GFP_KERNEL);
+ shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages);
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
total_reclaimed += sc.nr_reclaimed;
if (zone->all_unreclaimable)
config IPV6
tristate "The IPv6 protocol (EXPERIMENTAL)"
depends on INET && EXPERIMENTAL
+ select CRYPTO if IPV6_PRIVACY
+ select CRYPTO_MD5 if IPV6_PRIVACY
---help---
This is experimental support for the IP version 6 (formerly called
IPng "IP next generation"). You will still be able to do
source "net/bluetooth/Kconfig"
+source "net/tux/Kconfig"
+
source "drivers/net/Kconfig"
endmenu
ifneq ($(CONFIG_IPV6),)
obj-y += ipv6/
endif
+obj-$(CONFIG_TUX) += tux/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
obj-$(CONFIG_NET_SCHED) += sched/
case SIOCADDRT: {
struct net_device *dev = NULL;
- /*
- * FIXME: the name of the device is still in user
- * space, isn't it?
- */
if (rt.rt_dev) {
- dev = __dev_get_by_name(rt.rt_dev);
+ char name[IFNAMSIZ];
+ if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1))
+ return -EFAULT;
+ name[IFNAMSIZ-1] = '\0';
+ dev = __dev_get_by_name(name);
if (!dev)
return -ENODEV;
}
BRPRIV(skb->dev)->stats.rx_packets--;
br2684_push(atmvcc, skb);
}
- (void) try_module_get(THIS_MODULE);
+ __module_get(THIS_MODULE);
return 0;
error:
write_unlock_irq(&devs_lock);
/*
* lec.c: Lan Emulation driver
- * Marko Kiiskila carnil@cs.tut.fi
+ * Marko Kiiskila mkiiskila@yahoo.com
*
*/
static int lec_close(struct net_device *dev);
static struct net_device_stats *lec_get_stats(struct net_device *dev);
static void lec_init(struct net_device *dev);
-static inline struct lec_arp_table* lec_arp_find(struct lec_priv *priv,
+static struct lec_arp_table* lec_arp_find(struct lec_priv *priv,
unsigned char *mac_addr);
-static inline int lec_arp_remove(struct lec_priv *priv,
+static int lec_arp_remove(struct lec_priv *priv,
struct lec_arp_table *to_remove);
/* LANE2 functions */
static void lane2_associate_ind (struct net_device *dev, u8 *mac_address,
/*
* Remove entry from lec_arp_table
*/
-static inline int
+static int
lec_arp_remove(struct lec_priv *priv,
struct lec_arp_table *to_remove)
{
/*
* Find entry by mac_address
*/
-static inline struct lec_arp_table*
+static struct lec_arp_table*
lec_arp_find(struct lec_priv *priv,
unsigned char *mac_addr)
{
*
* Lan Emulation client header file
*
- * Marko Kiiskila carnil@cs.tut.fi
+ * Marko Kiiskila mkiiskila@yahoo.com
*
*/
/*
* Lec arp cache
- * Marko Kiiskila carnil@cs.tut.fi
+ * Marko Kiiskila mkiiskila@yahoo.com
*
*/
#ifndef _LEC_ARP_H
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/bitops.h>
+#include <linux/seq_file.h>
/* We are an ethernet device */
#include <linux/if_ether.h>
return 0;
}
-void atm_mpoa_disp_qos(char *page, ssize_t *len)
+/* this is buggered - we need locking for qos_head */
+void atm_mpoa_disp_qos(struct seq_file *m)
{
-
unsigned char *ip;
char ipaddr[16];
struct atm_mpoa_qos *qos;
qos = qos_head;
- *len += sprintf(page + *len, "QoS entries for shortcuts:\n");
- *len += sprintf(page + *len, "IP address\n TX:max_pcr pcr min_pcr max_cdv max_sdu\n RX:max_pcr pcr min_pcr max_cdv max_sdu\n");
+ seq_printf(m, "QoS entries for shortcuts:\n");
+ seq_printf(m, "IP address\n TX:max_pcr pcr min_pcr max_cdv max_sdu\n RX:max_pcr pcr min_pcr max_cdv max_sdu\n");
ipaddr[sizeof(ipaddr)-1] = '\0';
while (qos != NULL) {
ip = (unsigned char *)&qos->ipaddr;
sprintf(ipaddr, "%u.%u.%u.%u", NIPQUAD(ip));
- *len += sprintf(page + *len, "%u.%u.%u.%u\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
+ seq_printf(m, "%u.%u.%u.%u\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
NIPQUAD(ipaddr),
qos->qos.txtp.max_pcr, qos->qos.txtp.pcr, qos->qos.txtp.min_pcr, qos->qos.txtp.max_cdv, qos->qos.txtp.max_sdu,
qos->qos.rxtp.max_pcr, qos->qos.rxtp.pcr, qos->qos.rxtp.min_pcr, qos->qos.rxtp.max_cdv, qos->qos.rxtp.max_sdu);
qos = qos->next;
}
-
- return;
}
static struct net_device *find_lec_by_itfnum(int itf)
int atm_mpoa_delete_qos(struct atm_mpoa_qos *qos);
/* Display QoS entries. This is for the procfs */
-void atm_mpoa_disp_qos(char *page, ssize_t *len);
+struct seq_file;
+void atm_mpoa_disp_qos(struct seq_file *m);
#endif /* _MPC_H_ */
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
+#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/atmmpc.h>
#include <linux/atm.h>
extern struct mpoa_client *mpcs;
extern struct proc_dir_entry *atm_proc_root; /* from proc.c. */
-static ssize_t proc_mpc_read(struct file *file, char __user *buff,
- size_t count, loff_t *pos);
-
+static int proc_mpc_open(struct inode *inode, struct file *file);
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos);
-static int parse_qos(const char *buff, int len);
+static int parse_qos(const char *buff);
/*
* Define allowed FILE OPERATIONS
*/
static struct file_operations mpc_file_operations = {
.owner = THIS_MODULE,
- .read = proc_mpc_read,
+ .open = proc_mpc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
.write = proc_mpc_write,
+ .release = seq_release,
};
-static int print_header(char *buff,struct mpoa_client *mpc){
- if(mpc != NULL){
- return sprintf(buff,"\nInterface %d:\n\n",mpc->dev_num);
-
- }
- return 0;
-}
-
/*
* Returns the state of an ingress cache entry as a string
*/
}
}
+/*
+ * FIXME: mpcs (and per-mpc lists) have no locking whatsoever.
+ */
+
+static void *mpc_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t l = *pos;
+ struct mpoa_client *mpc;
+
+ if (!l--)
+ return SEQ_START_TOKEN;
+ for (mpc = mpcs; mpc; mpc = mpc->next)
+ if (!l--)
+ return mpc;
+ return NULL;
+}
+
+static void *mpc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct mpoa_client *p = v;
+ (*pos)++;
+ return v == SEQ_START_TOKEN ? mpcs : p->next;
+}
+
+static void mpc_stop(struct seq_file *m, void *v)
+{
+}
+
/*
* READING function - called when the /proc/atm/mpoa file is read from.
*/
-static ssize_t proc_mpc_read(struct file *file, char __user *buff,
- size_t count, loff_t *pos){
- unsigned long page = 0;
+static ssize_t mpc_show(struct seq_file *m, void *v)
+{
+ struct mpoa_client *mpc = v;
unsigned char *temp;
- ssize_t length = 0;
- int i = 0;
- struct mpoa_client *mpc = mpcs;
+ int i;
in_cache_entry *in_entry;
eg_cache_entry *eg_entry;
struct timeval now;
unsigned char ip_string[16];
- if(count == 0)
- return 0;
- page = get_zeroed_page(GFP_KERNEL);
- if(!page)
- return -ENOMEM;
- atm_mpoa_disp_qos((char *)page, &length);
- while(mpc != NULL){
- length += print_header((char *)page + length, mpc);
- length += sprintf((char *)page + length,"Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
- in_entry = mpc->in_cache;
- do_gettimeofday(&now);
- while(in_entry != NULL){
- temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip; sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
- length += sprintf((char *)page + length,"%-16s%s%-14lu%-12u", ip_string, ingress_state_string(in_entry->entry_state), (in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec)), in_entry->packets_fwded);
- if(in_entry->shortcut)
- length += sprintf((char *)page + length," %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci);
- length += sprintf((char *)page + length,"\n");
- in_entry = in_entry->next;
- }
- length += sprintf((char *)page + length,"\n");
- eg_entry = mpc->eg_cache;
- length += sprintf((char *)page + length,"Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
- while(eg_entry != NULL){
- for(i=0;i<ATM_ESA_LEN;i++){
- length += sprintf((char *)page + length,"%02x",eg_entry->ctrl_info.in_MPC_data_ATM_addr[i]);}
- length += sprintf((char *)page + length,"\n%-16lu%s%-14lu%-15u",(unsigned long) ntohl(eg_entry->ctrl_info.cache_id), egress_state_string(eg_entry->entry_state), (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)), eg_entry->packets_rcvd);
-
- /* latest IP address */
- temp = (unsigned char *)&eg_entry->latest_ip_addr;
- sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
- length += sprintf((char *)page + length, "%-16s", ip_string);
-
- if(eg_entry->shortcut)
- length += sprintf((char *)page + length," %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci);
- length += sprintf((char *)page + length,"\n");
- eg_entry = eg_entry->next;
- }
- length += sprintf((char *)page + length,"\n");
- mpc = mpc->next;
+
+ if (v == SEQ_START_TOKEN) {
+ atm_mpoa_disp_qos(m);
+ return 0;
}
- if (*pos >= length) length = 0;
- else {
- if ((count + *pos) > length) count = length - *pos;
- if (copy_to_user(buff, (char *)page , count)) {
- free_page(page);
- return -EFAULT;
- }
- *pos += count;
+ seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num);
+ seq_printf(m, "Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
+ do_gettimeofday(&now);
+
+ for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
+ temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip;
+ sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
+ seq_printf(m, "%-16s%s%-14lu%-12u",
+ ip_string,
+ ingress_state_string(in_entry->entry_state),
+ in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec),
+ in_entry->packets_fwded);
+ if (in_entry->shortcut)
+ seq_printf(m, " %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci);
+ seq_printf(m, "\n");
}
- free_page(page);
- return length;
+ seq_printf(m, "\n");
+ seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
+ for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
+ unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
+ for(i = 0; i < ATM_ESA_LEN; i++)
+ seq_printf(m, "%02x", p[i]);
+ seq_printf(m, "\n%-16lu%s%-14lu%-15u",
+ (unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
+ egress_state_string(eg_entry->entry_state),
+ (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)),
+ eg_entry->packets_rcvd);
+
+ /* latest IP address */
+ temp = (unsigned char *)&eg_entry->latest_ip_addr;
+ sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
+ seq_printf(m, "%-16s", ip_string);
+
+ if (eg_entry->shortcut)
+ seq_printf(m, " %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci);
+ seq_printf(m, "\n");
+ }
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static struct seq_operations mpc_op = {
+ .start = mpc_start,
+ .next = mpc_next,
+ .stop = mpc_stop,
+ .show = mpc_show
+};
+
+static int proc_mpc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &mpc_op);
}
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos)
{
- int incoming, error, retval;
- char *page, c;
- const char __user *tmp;
+ char *page, *p;
+ unsigned len;
- if (nbytes == 0) return 0;
- if (nbytes >= PAGE_SIZE) nbytes = PAGE_SIZE-1;
+ if (nbytes == 0)
+ return 0;
- error = verify_area(VERIFY_READ, buff, nbytes);
- if (error) return error;
+ if (nbytes >= PAGE_SIZE)
+ nbytes = PAGE_SIZE-1;
page = (char *)__get_free_page(GFP_KERNEL);
- if (page == NULL) return -ENOMEM;
-
- incoming = 0;
- tmp = buff;
- while(incoming < nbytes){
- if (get_user(c, tmp++)) return -EFAULT;
- incoming++;
- if (c == '\0' || c == '\n')
- break;
- }
+ if (!page)
+ return -ENOMEM;
- retval = copy_from_user(page, buff, incoming);
- if (retval != 0) {
- printk("mpoa: proc_mpc_write: copy_from_user() failed\n");
- return -EFAULT;
+ for (p = page, len = 0; len < nbytes; p++, len++) {
+ if (get_user(*p, buff++)) {
+ free_page((unsigned long)page);
+ return -EFAULT;
+ }
+ if (*p == '\0' || *p == '\n')
+ break;
}
- *ppos += incoming;
+ *p = '\0';
- page[incoming] = '\0';
- retval = parse_qos(page, incoming);
- if (retval == 0)
+ if (!parse_qos(page))
printk("mpoa: proc_mpc_write: could not parse '%s'\n", page);
free_page((unsigned long)page);
- return nbytes;
+ return len;
}
-static int parse_qos(const char *buff, int len)
+static int parse_qos(const char *buff)
{
/* possible lines look like this
* add 130.230.54.142 tx=max_pcr,max_sdu rx=max_pcr,max_sdu
*/
-
- int pos, i;
- uint32_t ipaddr;
unsigned char ip[4];
- char cmd[4], temp[256];
- const char *tmp, *prev;
+ int tx_pcr, tx_sdu, rx_pcr, rx_sdu;
+ uint32_t ipaddr;
struct atm_qos qos;
- int value[5];
memset(&qos, 0, sizeof(struct atm_qos));
- strlcpy(cmd, buff, sizeof(cmd));
- if( strncmp(cmd,"add", 3) && strncmp(cmd,"del", 3))
- return 0; /* not add or del */
-
- pos = 4;
- /* next parse ip */
- prev = buff + pos;
- for (i = 0; i < 3; i++) {
- tmp = strchr(prev, '.');
- if (tmp == NULL) return 0;
- memset(temp, '\0', 256);
- memcpy(temp, prev, tmp-prev);
- ip[i] = (char)simple_strtoul(temp, NULL, 0);
- tmp ++;
- prev = tmp;
- }
- tmp = strchr(prev, ' ');
- if (tmp == NULL) return 0;
- memset(temp, '\0', 256);
- memcpy(temp, prev, tmp-prev);
- ip[i] = (char)simple_strtoul(temp, NULL, 0);
- ipaddr = *(uint32_t *)ip;
-
- if(!strncmp(cmd, "del", 3))
- return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr));
-
- /* next transmit values */
- tmp = strstr(buff, "tx=");
- if(tmp == NULL) return 0;
- tmp += 3;
- prev = tmp;
- for( i = 0; i < 1; i++){
- tmp = strchr(prev, ',');
- if (tmp == NULL) return 0;
- memset(temp, '\0', 256);
- memcpy(temp, prev, tmp-prev);
- value[i] = (int)simple_strtoul(temp, NULL, 0);
- tmp ++;
- prev = tmp;
+
+ if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu",
+ ip, ip+1, ip+2, ip+3) == 4) {
+ ipaddr = *(uint32_t *)ip;
+ return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr));
}
- tmp = strchr(prev, ' ');
- if (tmp == NULL) return 0;
- memset(temp, '\0', 256);
- memcpy(temp, prev, tmp-prev);
- value[i] = (int)simple_strtoul(temp, NULL, 0);
+
+ if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=tx",
+ ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu) == 6) {
+ rx_pcr = tx_pcr;
+ rx_sdu = tx_sdu;
+ } else if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=%d,%d",
+ ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8)
+ return 0;
+
+ ipaddr = *(uint32_t *)ip;
qos.txtp.traffic_class = ATM_CBR;
- qos.txtp.max_pcr = value[0];
- qos.txtp.max_sdu = value[1];
-
- /* next receive values */
- tmp = strstr(buff, "rx=");
- if(tmp == NULL) return 0;
- if (strstr(buff, "rx=tx")) { /* rx == tx */
- qos.rxtp.traffic_class = qos.txtp.traffic_class;
- qos.rxtp.max_pcr = qos.txtp.max_pcr;
- qos.rxtp.max_cdv = qos.txtp.max_cdv;
- qos.rxtp.max_sdu = qos.txtp.max_sdu;
- } else {
- tmp += 3;
- prev = tmp;
- for( i = 0; i < 1; i++){
- tmp = strchr(prev, ',');
- if (tmp == NULL) return 0;
- memset(temp, '\0', 256);
- memcpy(temp, prev, tmp-prev);
- value[i] = (int)simple_strtoul(temp, NULL, 0);
- tmp ++;
- prev = tmp;
- }
- tmp = strchr(prev, '\0');
- if (tmp == NULL) return 0;
- memset(temp, '\0', 256);
- memcpy(temp, prev, tmp-prev);
- value[i] = (int)simple_strtoul(temp, NULL, 0);
- qos.rxtp.traffic_class = ATM_CBR;
- qos.rxtp.max_pcr = value[0];
- qos.rxtp.max_sdu = value[1];
- }
+ qos.txtp.max_pcr = tx_pcr;
+ qos.txtp.max_sdu = tx_sdu;
+ qos.rxtp.traffic_class = ATM_CBR;
+ qos.rxtp.max_pcr = rx_pcr;
+ qos.rxtp.max_sdu = rx_sdu;
qos.aal = ATM_AAL5;
dprintk("mpoa: mpoa_proc.c: parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
qos.txtp.max_pcr,
atmvcc->user_back = pvcc;
atmvcc->push = pppoatm_push;
atmvcc->pop = pppoatm_pop;
- (void) try_module_get(THIS_MODULE);
+ __module_get(THIS_MODULE);
return 0;
}
? -EFAULT : 0;
goto done;
case ATM_SETLOOP:
- if (__ATM_LM_XTRMT((int) (long) buf) &&
- __ATM_LM_XTLOC((int) (long) buf) >
- __ATM_LM_XTRMT((int) (long) buf)) {
+ if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
+ __ATM_LM_XTLOC((int) (unsigned long) buf) >
+ __ATM_LM_XTRMT((int) (unsigned long) buf)) {
error = -EINVAL;
goto done;
}
RFCOMM Module (RFCOMM Protocol)
BNEP Module (Bluetooth Network Encapsulation Protocol)
CMTP Module (CAPI Message Transport Protocol)
+ HIDP Module (Human Interface Device Protocol)
Say Y here to compile Bluetooth support into the kernel or say M to
compile it as module (bluetooth).
struct bnep_connlist_req {
__u32 cnum;
- struct bnep_conninfo *ci;
+ struct bnep_conninfo __user *ci;
};
struct bnep_proto_filter {
#define BT_DBG(D...)
#endif
-#define VERSION "1.0"
+#define VERSION "1.2"
static LIST_HEAD(bnep_session_list);
static DECLARE_RWSEM(bnep_session_sem);
static int bnep_send(struct bnep_session *s, void *data, size_t len)
{
struct socket *sock = s->sock;
- struct iovec iv = { data, len };
+ struct kvec iv = { data, len };
- s->msg.msg_iov = &iv;
- s->msg.msg_iovlen = 1;
- return sock_sendmsg(sock, &s->msg, len);
+ return kernel_sendmsg(sock, &s->msg, &iv, 1, len);
}
static int bnep_send_rsp(struct bnep_session *s, u8 ctrl, u16 resp)
return bnep_send(s, &rsp, sizeof(rsp));
}
+#ifdef CONFIG_BT_BNEP_PROTO_FILTER
+static inline void bnep_set_default_proto_filter(struct bnep_session *s)
+{
+ /* (IPv4, ARP) */
+ s->proto_filter[0].start = htons(0x0800);
+ s->proto_filter[0].end = htons(0x0806);
+ /* (RARP, AppleTalk) */
+ s->proto_filter[1].start = htons(0x8035);
+ s->proto_filter[1].end = htons(0x80F3);
+ /* (IPX, IPv6) */
+ s->proto_filter[2].start = htons(0x8137);
+ s->proto_filter[2].end = htons(0x86DD);
+}
+#endif
+
static int bnep_ctrl_set_netfilter(struct bnep_session *s, u16 *data, int len)
{
int n;
BT_DBG("proto filter start %d end %d",
f[i].start, f[i].end);
}
+
if (i < BNEP_MAX_PROTO_FILTERS)
memset(f + i, 0, sizeof(*f));
+ if (n == 0)
+ bnep_set_default_proto_filter(s);
+
bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_SUCCESS);
} else {
bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_FILTER_LIMIT_REACHED);
{
struct ethhdr *eh = (void *) skb->data;
struct socket *sock = s->sock;
- struct iovec iv[3];
+ struct kvec iv[3];
int len = 0, il = 0;
u8 type = 0;
goto send;
}
- iv[il++] = (struct iovec) { &type, 1 };
+ iv[il++] = (struct kvec) { &type, 1 };
len++;
if (!memcmp(eh->h_dest, s->eh.h_source, ETH_ALEN))
type = __bnep_tx_types[type];
switch (type) {
case BNEP_COMPRESSED_SRC_ONLY:
- iv[il++] = (struct iovec) { eh->h_source, ETH_ALEN };
+ iv[il++] = (struct kvec) { eh->h_source, ETH_ALEN };
len += ETH_ALEN;
break;
case BNEP_COMPRESSED_DST_ONLY:
- iv[il++] = (struct iovec) { eh->h_dest, ETH_ALEN };
+ iv[il++] = (struct kvec) { eh->h_dest, ETH_ALEN };
len += ETH_ALEN;
break;
}
send:
- iv[il++] = (struct iovec) { skb->data, skb->len };
+ iv[il++] = (struct kvec) { skb->data, skb->len };
len += skb->len;
/* FIXME: linearize skb */
{
- s->msg.msg_iov = iv;
- s->msg.msg_iovlen = il;
- len = sock_sendmsg(sock, &s->msg, len);
+ len = kernel_sendmsg(sock, &s->msg, iv, il, len);
}
kfree_skb(skb);
set_user_nice(current, -15);
current->flags |= PF_NOFREEZE;
- set_fs(KERNEL_DS);
-
init_waitqueue_entry(&wait, current);
add_wait_queue(sk->sk_sleep, &wait);
while (!atomic_read(&s->killed)) {
/* Set default mc filter */
set_bit(bnep_mc_hash(dev->broadcast), (ulong *) &s->mc_filter);
#endif
-
+
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Set default protocol filter */
-
- /* (IPv4, ARP) */
- s->proto_filter[0].start = htons(0x0800);
- s->proto_filter[0].end = htons(0x0806);
- /* (RARP, AppleTalk) */
- s->proto_filter[1].start = htons(0x8035);
- s->proto_filter[1].end = htons(0x80F3);
- /* (IPX, IPv6) */
- s->proto_filter[2].start = htons(0x8137);
- s->proto_filter[2].end = htons(0x86DD);
+ bnep_set_default_proto_filter(s);
#endif
-
+
err = register_netdev(dev);
if (err) {
goto failed;
static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, int len)
{
struct socket *sock = session->sock;
- struct iovec iv = { data, len };
+ struct kvec iv = { data, len };
struct msghdr msg;
BT_DBG("session %p data %p len %d", session, data, len);
return 0;
memset(&msg, 0, sizeof(msg));
- msg.msg_iovlen = 1;
- msg.msg_iov = &iv;
- return sock_sendmsg(sock, &msg, len);
+ return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
static int cmtp_process_transmit(struct cmtp_session *session)
set_user_nice(current, -15);
current->flags |= PF_NOFREEZE;
- set_fs(KERNEL_DS);
-
init_waitqueue_entry(&wait, current);
add_wait_queue(sk->sk_sleep, &wait);
while (!atomic_read(&session->terminate)) {
static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
{
- struct iovec iv = { data, len };
+ struct kvec iv = { data, len };
struct msghdr msg;
BT_DBG("sock %p data %p len %d", sock, data, len);
return 0;
memset(&msg, 0, sizeof(msg));
- msg.msg_iovlen = 1;
- msg.msg_iov = &iv;
- return sock_sendmsg(sock, &msg, len);
+ return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
static int hidp_process_transmit(struct hidp_session *session)
set_user_nice(current, -15);
current->flags |= PF_NOFREEZE;
- set_fs(KERNEL_DS);
-
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait);
int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel)
{
- mm_segment_t fs;
int r;
rfcomm_lock();
- fs = get_fs(); set_fs(KERNEL_DS);
r = __rfcomm_dlc_open(d, src, dst, channel);
- set_fs(fs);
rfcomm_unlock();
return r;
int rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
{
- mm_segment_t fs;
int r;
rfcomm_lock();
- fs = get_fs(); set_fs(KERNEL_DS);
r = __rfcomm_dlc_close(d, err);
- set_fs(fs);
rfcomm_unlock();
return r;
{
struct rfcomm_session *s = NULL;
struct sockaddr_l2 addr;
- struct l2cap_options opts;
struct socket *sock;
- int size;
+ struct sock *sk;
BT_DBG("%s %s", batostr(src), batostr(dst));
goto failed;
/* Set L2CAP options */
- size = sizeof(opts);
- sock->ops->getsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, &size);
-
- opts.imtu = RFCOMM_MAX_L2CAP_MTU;
- sock->ops->setsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, size);
+ sk = sock->sk;
+ lock_sock(sk);
+ l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU;
+ release_sock(sk);
s = rfcomm_session_add(sock, BT_BOUND);
if (!s) {
static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
{
struct socket *sock = s->sock;
- struct iovec iv = { data, len };
+ struct kvec iv = { data, len };
struct msghdr msg;
BT_DBG("session %p len %d", s, len);
memset(&msg, 0, sizeof(msg));
- msg.msg_iovlen = 1;
- msg.msg_iov = &iv;
- return sock_sendmsg(sock, &msg, len);
+ return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len)
{
struct socket *sock = s->sock;
- struct iovec iv[3];
+ struct kvec iv[3];
struct msghdr msg;
unsigned char hdr[5], crc[1];
iv[2].iov_len = 1;
memset(&msg, 0, sizeof(msg));
- msg.msg_iovlen = 3;
- msg.msg_iov = iv;
- return sock_sendmsg(sock, &msg, 6 + len);
+ return kernel_sendmsg(sock, &msg, iv, 3, 6 + len);
}
static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits)
static int rfcomm_add_listener(bdaddr_t *ba)
{
struct sockaddr_l2 addr;
- struct l2cap_options opts;
struct socket *sock;
+ struct sock *sk;
struct rfcomm_session *s;
- int size, err = 0;
+ int err = 0;
/* Create socket */
err = rfcomm_l2sock_create(&sock);
}
/* Set L2CAP options */
- size = sizeof(opts);
- sock->ops->getsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, &size);
-
- opts.imtu = RFCOMM_MAX_L2CAP_MTU;
- sock->ops->setsockopt(sock, SOL_L2CAP, L2CAP_OPTIONS, (void *)&opts, size);
+ sk = sock->sk;
+ lock_sock(sk);
+ l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU;
+ release_sock(sk);
/* Start listening on the socket */
err = sock->ops->listen(sock, 10);
set_user_nice(current, -10);
current->flags |= PF_NOFREEZE;
- set_fs(KERNEL_DS);
-
BT_DBG("");
rfcomm_add_listener(BDADDR_ANY);
return &br->statistics;
}
-static int __br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct net_bridge *br;
- unsigned char *dest;
+ struct net_bridge *br = netdev_priv(dev);
+ const unsigned char *dest = skb->data;
struct net_bridge_fdb_entry *dst;
- br = dev->priv;
br->statistics.tx_packets++;
br->statistics.tx_bytes += skb->len;
- dest = skb->mac.raw = skb->data;
+ skb->mac.raw = skb->data;
skb_pull(skb, ETH_HLEN);
- if (dest[0] & 1) {
+ rcu_read_lock();
+ if (dest[0] & 1)
br_flood_deliver(br, skb, 0);
- return 0;
- }
-
- if ((dst = br_fdb_get(br, dest)) != NULL) {
+ else if ((dst = __br_fdb_get(br, dest)) != NULL)
br_deliver(dst->dst, skb);
- br_fdb_put(dst);
- return 0;
- }
-
- br_flood_deliver(br, skb, 0);
- return 0;
-}
-
-int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- int ret;
+ else
+ br_flood_deliver(br, skb, 0);
- rcu_read_lock();
- ret = __br_dev_xmit(skb, dev);
rcu_read_unlock();
-
- return ret;
+ return 0;
}
static int br_dev_open(struct net_device *dev)
static __inline__ void fdb_delete(struct net_bridge_fdb_entry *f)
{
- hlist_del(&f->hlist);
+ hlist_del_rcu(&f->hlist);
if (!f->is_static)
- list_del(&f->age_list);
+ list_del(&f->u.age_list);
br_fdb_put(f);
}
struct net_bridge *br = p->br;
int i;
- write_lock_bh(&br->hash_lock);
+ spin_lock_bh(&br->hash_lock);
/* Search all chains since old address/hash is unknown */
for (i = 0; i < BR_HASH_SIZE; i++) {
fdb_insert(br, p, newaddr, 1);
- write_unlock_bh(&br->hash_lock);
+ spin_unlock_bh(&br->hash_lock);
}
void br_fdb_cleanup(unsigned long _data)
struct list_head *l, *n;
unsigned long delay;
- write_lock_bh(&br->hash_lock);
+ spin_lock_bh(&br->hash_lock);
delay = hold_time(br);
list_for_each_safe(l, n, &br->age_list) {
- struct net_bridge_fdb_entry *f
- = list_entry(l, struct net_bridge_fdb_entry, age_list);
- unsigned long expires = f->ageing_timer + delay;
+ struct net_bridge_fdb_entry *f;
+ unsigned long expires;
+
+ f = list_entry(l, struct net_bridge_fdb_entry, u.age_list);
+ expires = f->ageing_timer + delay;
if (time_before_eq(expires, jiffies)) {
WARN_ON(f->is_static);
break;
}
}
- write_unlock_bh(&br->hash_lock);
+ spin_unlock_bh(&br->hash_lock);
}
void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *p)
{
int i;
- write_lock_bh(&br->hash_lock);
+ spin_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h, *g;
skip_delete: ;
}
}
- write_unlock_bh(&br->hash_lock);
+ spin_unlock_bh(&br->hash_lock);
}
-struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br, unsigned char *addr)
+/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */
+struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
+ const unsigned char *addr)
{
struct hlist_node *h;
+ struct net_bridge_fdb_entry *fdb;
- read_lock_bh(&br->hash_lock);
-
- hlist_for_each(h, &br->hash[br_mac_hash(addr)]) {
- struct net_bridge_fdb_entry *fdb
- = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
-
+ hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
if (!memcmp(fdb->addr.addr, addr, ETH_ALEN)) {
- if (has_expired(br, fdb))
- goto ret_null;
-
- atomic_inc(&fdb->use_count);
- read_unlock_bh(&br->hash_lock);
+ if (unlikely(has_expired(br, fdb)))
+ break;
return fdb;
}
}
- ret_null:
- read_unlock_bh(&br->hash_lock);
+
return NULL;
}
+/* Interface used by ATM hook that keeps a ref count */
+struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
+ unsigned char *addr)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ rcu_read_lock();
+ fdb = __br_fdb_get(br, addr);
+ if (fdb)
+ atomic_inc(&fdb->use_count);
+ rcu_read_unlock();
+ return fdb;
+}
+
+static void fdb_rcu_free(struct rcu_head *head)
+{
+ struct net_bridge_fdb_entry *ent
+ = container_of(head, struct net_bridge_fdb_entry, u.rcu);
+ kmem_cache_free(br_fdb_cache, ent);
+}
+
+/* Set entry up for deletion with RCU */
void br_fdb_put(struct net_bridge_fdb_entry *ent)
{
if (atomic_dec_and_test(&ent->use_count))
- kmem_cache_free(br_fdb_cache, ent);
+ call_rcu(&ent->u.rcu, fdb_rcu_free);
}
/*
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
- read_lock_bh(&br->hash_lock);
+ rcu_read_lock();
for (i = 0; i < BR_HASH_SIZE; i++) {
- hlist_for_each_entry(f, h, &br->hash[i], hlist) {
+ hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
if (num >= maxnum)
goto out;
}
out:
- read_unlock_bh(&br->hash_lock);
+ rcu_read_unlock();
return num;
}
return 0;
/* move to end of age list */
- list_del(&fdb->age_list);
+ list_del(&fdb->u.age_list);
goto update;
}
}
memcpy(fdb->addr.addr, addr, ETH_ALEN);
atomic_set(&fdb->use_count, 1);
- hlist_add_head(&fdb->hlist, &br->hash[hash]);
+ hlist_add_head_rcu(&fdb->hlist, &br->hash[hash]);
if (!timer_pending(&br->gc_timer)) {
br->gc_timer.expires = jiffies + hold_time(br);
fdb->is_static = is_local;
fdb->ageing_timer = jiffies;
if (!is_local)
- list_add_tail(&fdb->age_list, &br->age_list);
+ list_add_tail(&fdb->u.age_list, &br->age_list);
return 0;
}
{
int ret;
- write_lock_bh(&br->hash_lock);
+ spin_lock_bh(&br->hash_lock);
ret = fdb_insert(br, source, addr, is_local);
- write_unlock_bh(&br->hash_lock);
+ spin_unlock_bh(&br->hash_lock);
return ret;
}
br->lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&br->port_list);
- br->hash_lock = RW_LOCK_UNLOCKED;
+ br->hash_lock = SPIN_LOCK_UNLOCKED;
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
return ret;
}
+/* Mtu of the bridge pseudo-device 1500 or the minimum of the ports */
int br_min_mtu(const struct net_bridge *br)
{
const struct net_bridge_port *p;
spin_lock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
- if ((br->dev->flags & IFF_UP) && (dev->flags & IFF_UP))
+ if ((br->dev->flags & IFF_UP)
+ && (dev->flags & IFF_UP) && netif_carrier_ok(dev))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
- br->dev->mtu = br_min_mtu(br);
+ dev_set_mtu(br->dev, br_min_mtu(br));
}
return err;
goto out;
}
- dst = br_fdb_get(br, dest);
+ dst = __br_fdb_get(br, dest);
if (dst != NULL && dst->is_local) {
if (!passedup)
br_pass_frame_up(br, skb);
else
kfree_skb(skb);
- br_fdb_put(dst);
goto out;
}
if (dst != NULL) {
br_forward(dst->dst, skb);
- br_fdb_put(dst);
goto out;
}
#ifdef CONFIG_SYSCTL
static
int brnf_sysctl_call_tables(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (write && *(int *)(ctl->data))
*(int *)(ctl->data) = 1;
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr);
-struct notifier_block br_device_notifier =
-{
+struct notifier_block br_device_notifier = {
.notifier_call = br_device_event
};
+/*
+ * Handle changes in state of network devices enslaved to a bridge.
+ *
+ * Note: don't care about up/down if bridge itself is down, because
+ * port state is checked when bridge is brought up.
+ */
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
- struct net_device *dev;
- struct net_bridge_port *p;
+ struct net_device *dev = ptr;
+ struct net_bridge_port *p = dev->br_port;
struct net_bridge *br;
- dev = ptr;
- p = dev->br_port;
-
+ /* not a port of a bridge */
if (p == NULL)
return NOTIFY_DONE;
br = p->br;
+ spin_lock_bh(&br->lock);
switch (event) {
+ case NETDEV_CHANGEMTU:
+ dev_set_mtu(br->dev, br_min_mtu(br));
+ break;
+
case NETDEV_CHANGEADDR:
- spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
- if (br->dev->flags & IFF_UP)
- br_stp_recalculate_bridge_id(br);
- spin_unlock_bh(&br->lock);
+ br_stp_recalculate_bridge_id(br);
break;
- case NETDEV_CHANGEMTU:
- br->dev->mtu = br_min_mtu(br);
+ case NETDEV_CHANGE: /* device is up but carrier changed */
+ if (!(br->dev->flags & IFF_UP))
+ break;
+
+ if (netif_carrier_ok(dev)) {
+ if (p->state == BR_STATE_DISABLED)
+ br_stp_enable_port(p);
+ } else {
+ if (p->state != BR_STATE_DISABLED)
+ br_stp_disable_port(p);
+ }
break;
case NETDEV_DOWN:
- if (br->dev->flags & IFF_UP) {
- spin_lock_bh(&br->lock);
+ if (br->dev->flags & IFF_UP)
br_stp_disable_port(p);
- spin_unlock_bh(&br->lock);
- }
break;
case NETDEV_UP:
- if (br->dev->flags & IFF_UP) {
- spin_lock_bh(&br->lock);
+ if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP))
br_stp_enable_port(p);
- spin_unlock_bh(&br->lock);
- }
break;
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
- }
+ }
+ spin_unlock_bh(&br->lock);
return NOTIFY_DONE;
}
{
struct hlist_node hlist;
struct net_bridge_port *dst;
- struct list_head age_list;
+ union {
+ struct list_head age_list;
+ struct rcu_head rcu;
+ } u;
atomic_t use_count;
unsigned long ageing_timer;
mac_addr addr;
struct list_head port_list;
struct net_device *dev;
struct net_device_stats statistics;
- rwlock_t hash_lock;
+ spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
struct list_head age_list;
extern void br_fdb_cleanup(unsigned long arg);
extern void br_fdb_delete_by_port(struct net_bridge *br,
struct net_bridge_port *p);
+extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
+ const unsigned char *addr);
extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
- unsigned char *addr);
+ unsigned char *addr);
extern void br_fdb_put(struct net_bridge_fdb_entry *ent);
extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
unsigned long count, unsigned long off);
int isroot = br_is_root_bridge(br);
pr_info("%s: topology change detected, %s\n", br->dev->name,
- isroot ? "propgating" : "sending tcn bpdu");
+ isroot ? "propagating" : "sending tcn bpdu");
if (isroot) {
br->topology_change = 1;
br_config_bpdu_generation(br);
list_for_each_entry(p, &br->port_list, list) {
- if (p->dev->flags & IFF_UP)
+ if ((p->dev->flags & IFF_UP) && netif_carrier_ok(p->dev))
br_stp_enable_port(p);
}
*/
DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
-#ifdef CONFIG_NET_FASTROUTE
-int netdev_fastroute;
-int netdev_fastroute_obstacles;
-#endif
-
#ifdef CONFIG_SYSFS
extern int netdev_sysfs_init(void);
extern int netdev_register_sysfs(struct net_device *);
int hash;
spin_lock_bh(&ptype_lock);
-#ifdef CONFIG_NET_FASTROUTE
- if (pt->af_packet_priv) {
- netdev_fastroute_obstacles++;
- dev_clear_fastroute(pt->dev);
- }
-#endif
if (pt->type == htons(ETH_P_ALL)) {
netdev_nit++;
list_add_rcu(&pt->list, &ptype_all);
list_for_each_entry(pt1, head, list) {
if (pt == pt1) {
-#ifdef CONFIG_NET_FASTROUTE
- if (pt->af_packet_priv)
- netdev_fastroute_obstacles--;
-#endif
list_del_rcu(&pt->list);
goto out;
}
return ret;
}
-#ifdef CONFIG_NET_FASTROUTE
-
-static void dev_do_clear_fastroute(struct net_device *dev)
-{
- if (dev->accept_fastpath) {
- int i;
-
- for (i = 0; i <= NETDEV_FASTROUTE_HMASK; i++) {
- struct dst_entry *dst;
-
- write_lock_irq(&dev->fastpath_lock);
- dst = dev->fastpath[i];
- dev->fastpath[i] = NULL;
- write_unlock_irq(&dev->fastpath_lock);
-
- dst_release(dst);
- }
- }
-}
-
-void dev_clear_fastroute(struct net_device *dev)
-{
- if (dev) {
- dev_do_clear_fastroute(dev);
- } else {
- read_lock(&dev_base_lock);
- for (dev = dev_base; dev; dev = dev->next)
- dev_do_clear_fastroute(dev);
- read_unlock(&dev_base_lock);
- }
-}
-#endif
-
/**
* dev_close - shutdown an interface.
* @dev: device to shutdown
*/
dev->flags &= ~IFF_UP;
-#ifdef CONFIG_NET_FASTROUTE
- dev_clear_fastroute(dev);
-#endif
/*
* Tell people we are down
} \
}
+static inline void qdisc_run(struct net_device *dev)
+{
+ while (!netif_queue_stopped(dev) &&
+ qdisc_restart(dev)<0)
+ /* NOTHING */;
+}
+
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
__get_cpu_var(netdev_rx_stat).total++;
-#ifdef CONFIG_NET_FASTROUTE
- if (skb->pkt_type == PACKET_FASTROUTE) {
- __get_cpu_var(netdev_rx_stat).fastroute_deferred_out++;
- return dev_queue_xmit(skb);
- }
-#endif
-
skb->h.raw = skb->nh.raw = skb->data;
skb->mac_len = skb->nh.raw - skb->mac.raw;
if ((dev->promiscuity += inc) == 0)
dev->flags &= ~IFF_PROMISC;
if (dev->flags ^ old_flags) {
-#ifdef CONFIG_NET_FASTROUTE
- if (dev->flags & IFF_PROMISC) {
- netdev_fastroute_obstacles++;
- dev_clear_fastroute(dev);
- } else
- netdev_fastroute_obstacles--;
-#endif
dev_mc_upload(dev);
printk(KERN_INFO "device %s %s promiscuous mode\n",
dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
spin_lock_init(&dev->ingress_lock);
#endif
-#ifdef CONFIG_NET_FASTROUTE
- dev->fastpath_lock = RW_LOCK_UNLOCKED;
-#endif
-
ret = alloc_divert_blk(dev);
if (ret)
goto out;
while (atomic_read(&dev->refcnt) != 0) {
if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
rtnl_shlock();
- rtnl_exlock();
/* Rebroadcast unregister notification */
notifier_call_chain(&netdev_chain,
linkwatch_run_queue();
}
- rtnl_exunlock();
rtnl_shunlock();
rebroadcast_time = jiffies;
synchronize_net();
-#ifdef CONFIG_NET_FASTROUTE
- dev_clear_fastroute(dev);
-#endif
-
/* Shutdown queueing discipline. */
dev_shutdown(dev);
EXPORT_SYMBOL(dev_remove_pack);
EXPORT_SYMBOL(dev_set_allmulti);
EXPORT_SYMBOL(dev_set_promiscuity);
+EXPORT_SYMBOL(dev_change_flags);
+EXPORT_SYMBOL(dev_set_mtu);
EXPORT_SYMBOL(free_netdev);
EXPORT_SYMBOL(netdev_boot_setup_check);
EXPORT_SYMBOL(netdev_set_master);
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
EXPORT_SYMBOL(br_handle_frame_hook);
#endif
-/* for 801q VLAN support */
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-EXPORT_SYMBOL(dev_change_flags);
-#endif
+
#ifdef CONFIG_KMOD
EXPORT_SYMBOL(dev_load);
#endif
EXPORT_SYMBOL(netdev_register_fc);
EXPORT_SYMBOL(netdev_unregister_fc);
#endif
-#ifdef CONFIG_NET_FASTROUTE
-EXPORT_SYMBOL(netdev_fastroute);
-EXPORT_SYMBOL(netdev_fastroute_obstacles);
-#endif
#ifdef CONFIG_NET_CLS_ACT
EXPORT_SYMBOL(ing_filter);
if (copy_to_user(useraddr, ®s, sizeof(regs)))
goto out;
useraddr += offsetof(struct ethtool_regs, data);
- if (copy_to_user(useraddr, regbuf, reglen))
+ if (copy_to_user(useraddr, regbuf, regs.len))
goto out;
ret = 0;
clear_bit(LW_RUNNING, &linkwatch_flags);
rtnl_shlock();
- rtnl_exlock();
linkwatch_run_queue();
- rtnl_exunlock();
rtnl_shunlock();
}
*/
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se");
MODULE_DESCRIPTION("Packet Generator tool");
MODULE_LICENSE("GPL");
-MODULE_PARM(count_d, "i");
-MODULE_PARM(ipg_d, "i");
-MODULE_PARM(cpu_speed, "i");
-MODULE_PARM(clone_skb_d, "i");
+module_param(count_d, int, 0);
+module_param(ipg_d, int, 0);
+module_param(cpu_speed, int, 0);
+module_param(clone_skb_d, int, 0);
void rtnl_lock(void)
{
rtnl_shlock();
- rtnl_exlock();
}
void rtnl_unlock(void)
{
- rtnl_exunlock();
rtnl_shunlock();
netdev_run_todo();
struct rtnetlink_link *link_tab;
struct rtattr *rta[RTATTR_MAX];
- int exclusive = 0;
int sz_idx, kind;
int min_len;
int family;
return -1;
}
- if (kind != 2) {
- if (rtnl_exlock_nowait()) {
- *errp = 0;
- return -1;
- }
- exclusive = 1;
- }
-
memset(&rta, 0, sizeof(rta));
min_len = rtm_min[sz_idx];
goto err_inval;
err = link->doit(skb, nlh, (void *)&rta);
- if (exclusive)
- rtnl_exunlock();
*errp = err;
return err;
err_inval:
- if (exclusive)
- rtnl_exunlock();
*errp = -EINVAL;
return -1;
}
return -EFAULT;
}
+/* Keep iterating until skb_iter_next returns false. */
+void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i)
+{
+ i->len = skb_headlen(skb);
+ i->data = (unsigned char *)skb->data;
+ i->nextfrag = 0;
+ i->fraglist = NULL;
+}
+
+int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i)
+{
+ /* Unmap previous, if not head fragment. */
+ if (i->nextfrag)
+ kunmap_skb_frag(i->data);
+
+ if (i->fraglist) {
+ fraglist:
+ /* We're iterating through fraglist. */
+ if (i->nextfrag < skb_shinfo(i->fraglist)->nr_frags) {
+ i->data = kmap_skb_frag(&skb_shinfo(i->fraglist)
+ ->frags[i->nextfrag]);
+ i->len = skb_shinfo(i->fraglist)->frags[i->nextfrag]
+ .size;
+ i->nextfrag++;
+ return 1;
+ }
+ /* Fragments with fragments? Too hard! */
+ BUG_ON(skb_shinfo(i->fraglist)->frag_list);
+ i->fraglist = i->fraglist->next;
+ if (!i->fraglist)
+ goto end;
+
+ i->len = skb_headlen(i->fraglist);
+ i->data = i->fraglist->data;
+ i->nextfrag = 0;
+ return 1;
+ }
+
+ if (i->nextfrag < skb_shinfo(skb)->nr_frags) {
+ i->data = kmap_skb_frag(&skb_shinfo(skb)->frags[i->nextfrag]);
+ i->len = skb_shinfo(skb)->frags[i->nextfrag].size;
+ i->nextfrag++;
+ return 1;
+ }
+
+ i->fraglist = skb_shinfo(skb)->frag_list;
+ if (i->fraglist)
+ goto fraglist;
+
+end:
+ /* Bug trap for callers */
+ i->data = NULL;
+ return 0;
+}
+
+void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i)
+{
+ /* Unmap previous, if not head fragment. */
+ if (i->data && i->nextfrag)
+ kunmap_skb_frag(i->data);
+ /* Bug trap for callers */
+ i->data = NULL;
+}
+
/* Checksum skb data. */
unsigned int skb_checksum(const struct sk_buff *skb, int offset,
EXPORT_SYMBOL(skb_unlink);
EXPORT_SYMBOL(skb_append);
EXPORT_SYMBOL(skb_split);
+EXPORT_SYMBOL(skb_iter_first);
+EXPORT_SYMBOL(skb_iter_next);
+EXPORT_SYMBOL(skb_iter_abort);
printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
__FUNCTION__, atomic_read(&sk->sk_omem_alloc));
- /*
- * If sendmsg cached page exists, toss it.
- */
- if (sk->sk_sndmsg_page) {
- __free_page(sk->sk_sndmsg_page);
- sk->sk_sndmsg_page = NULL;
- }
-
security_sk_free(sk);
kmem_cache_free(sk->sk_slab, sk);
module_put(owner);
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
{
ssize_t res;
- struct msghdr msg;
- struct iovec iov;
- mm_segment_t old_fs;
- char *kaddr;
-
- kaddr = kmap(page);
-
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = flags;
-
- /* This cast is ok because of the "set_fs(KERNEL_DS)" */
- iov.iov_base = (void __user *) (kaddr + offset);
+ struct msghdr msg = {.msg_flags = flags};
+ struct kvec iov;
+ char *kaddr = kmap(page);
+ iov.iov_base = kaddr + offset;
iov.iov_len = size;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- res = sock_sendmsg(sock, &msg, size);
- set_fs(old_fs);
-
+ res = kernel_sendmsg(sock, &msg, &iov, 1, size);
kunmap(page);
return res;
}
.mode = 0644,
.proc_handler = &proc_dointvec
},
-#ifdef CONFIG_NET_FASTROUTE
- {
- .ctl_name = NET_CORE_FASTROUTE,
- .procname = "netdev_fastroute",
- .data = &netdev_fastroute,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
-#endif
{
.ctl_name = NET_CORE_MSG_COST,
.procname = "message_cost",
static int max_priority[] = { 127 }; /* From DECnet spec */
static int dn_forwarding_proc(ctl_table *, int, struct file *,
- void __user *, size_t *);
+ void __user *, size_t *, loff_t *);
static int dn_forwarding_sysctl(ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
static int dn_forwarding_proc(ctl_table *table, int write,
struct file *filep,
- void __user *buffer, size_t *lenp)
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
#ifdef CONFIG_DECNET_ROUTER
struct net_device *dev = table->extra1;
dn_db = dev->dn_ptr;
old = dn_db->parms.forwarding;
- err = proc_dointvec(table, write, filep, buffer, lenp);
+ err = proc_dointvec(table, write, filep, buffer, lenp, ppos);
if ((err >= 0) && write) {
if (dn_db->parms.forwarding < 0)
static int dn_node_address_handler(ctl_table *table, int write,
struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
char addr[DN_ASCBUF_LEN];
size_t len;
dn_address dnaddr;
- if (!*lenp || (filp->f_pos && !write)) {
+ if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
dn_dev_devices_on();
- filp->f_pos += len;
+ *ppos += len;
return 0;
}
return -EFAULT;
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return 0;
}
static int dn_def_dev_handler(ctl_table *table, int write,
struct file * filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
size_t len;
struct net_device *dev;
char devname[17];
- if (!*lenp || (filp->f_pos && !write)) {
+ if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
dev_put(dev);
return -ENODEV;
}
- filp->f_pos += *lenp;
+ *ppos += *lenp;
return 0;
}
return -EFAULT;
*lenp = len;
- filp->f_pos += len;
+ *ppos += len;
return 0;
}
static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
{
- struct sockaddr_in sin;
- struct iovec iov;
- struct aunhdr ah;
+ struct sockaddr_in sin = {
+ .sin_family = AF_INET,
+ .sin_port = htons(AUN_PORT),
+ .sin_addr = {.s_addr = addr}
+ };
+ struct aunhdr ah = {.code = code, .cb = cb, .handle = seq};
+ struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)};
struct msghdr udpmsg;
- int err;
- mm_segment_t oldfs;
- memset(&sin, 0, sizeof(sin));
- sin.sin_family = AF_INET;
- sin.sin_port = htons(AUN_PORT);
- sin.sin_addr.s_addr = addr;
-
- ah.code = code;
- ah.pad = 0;
- ah.port = 0;
- ah.cb = cb;
- ah.handle = seq;
-
- iov.iov_base = (void *)&ah;
- iov.iov_len = sizeof(ah);
-
udpmsg.msg_name = (void *)&sin;
udpmsg.msg_namelen = sizeof(sin);
- udpmsg.msg_iov = &iov;
- udpmsg.msg_iovlen = 1;
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
udpmsg.msg_flags=0;
- oldfs = get_fs(); set_fs(KERNEL_DS);
- err = sock_sendmsg(udpsock, &udpmsg, sizeof(ah));
- set_fs(oldfs);
+ kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
}
ip_input.o ip_fragment.o ip_forward.o ip_options.o \
ip_output.o ip_sockglue.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o tcp_minisocks.o \
- tcp_diag.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \
+ tcp_diag.o datagram.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \
sysctl_net_ipv4.o fib_frontend.o fib_semantics.o fib_hash.o
obj-$(CONFIG_PROC_FS) += proc.o
#include <linux/config.h>
#include <linux/module.h>
-#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ah.h>
iph->tos = top_iph->tos;
iph->ttl = top_iph->ttl;
iph->frag_off = top_iph->frag_off;
- iph->daddr = top_iph->daddr;
if (top_iph->ihl != 5) {
+ iph->daddr = top_iph->daddr;
memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
if (err)
top_iph->tos = iph->tos;
top_iph->ttl = iph->ttl;
top_iph->frag_off = iph->frag_off;
- top_iph->daddr = iph->daddr;
- if (top_iph->ihl != 5)
+ if (top_iph->ihl != 5) {
+ top_iph->daddr = iph->daddr;
memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
+ }
ip_send_check(top_iph);
if (ip_route_output_key(&rt, &fl) < 0)
return 1;
if (rt->u.dst.dev != dev) {
- NET_INC_STATS_BH(ArpFilter);
+ NET_INC_STATS_BH(LINUX_MIB_ARPFILTER);
flag = 1;
}
ip_rt_put(rt);
static int devinet_sysctl_forward(ctl_table *ctl, int write,
struct file* filp, void __user *buffer,
- size_t *lenp)
+ size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
- int ret = proc_dointvec(ctl, write, filp, buffer, lenp);
+ int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (write && *valp != val) {
if (valp == &ipv4_devconf.forwarding)
int ipv4_doint_and_flush(ctl_table *ctl, int write,
struct file* filp, void __user *buffer,
- size_t *lenp)
+ size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
- int ret = proc_dointvec(ctl, write, filp, buffer, lenp);
+ int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (write && *valp != val)
rt_cache_flush(0);
#include <linux/config.h>
#include <linux/module.h>
-#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
*/
struct icmp_control {
- int output_off; /* Field offset for increment on output */
- int input_off; /* Field offset for increment on input */
+ int output_entry; /* Field for increment on output */
+ int input_entry; /* Field for increment on input */
void (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
static void icmp_out_count(int type)
{
if (type <= NR_ICMP_TYPES) {
- ICMP_INC_STATS_FIELD(icmp_pointers[type].output_off);
- ICMP_INC_STATS(IcmpOutMsgs);
+ ICMP_INC_STATS(icmp_pointers[type].output_entry);
+ ICMP_INC_STATS(ICMP_MIB_OUTMSGS);
}
}
out:
return;
out_err:
- ICMP_INC_STATS_BH(IcmpInErrors);
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
goto out;
}
out:
return;
out_err:
- ICMP_INC_STATS_BH(IcmpInErrors);
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
goto out;
}
out:
return;
out_err:
- ICMP_INC_STATS_BH(IcmpInErrors);
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
goto out;
}
struct icmphdr *icmph;
struct rtable *rt = (struct rtable *)skb->dst;
- ICMP_INC_STATS_BH(IcmpInMsgs);
+ ICMP_INC_STATS_BH(ICMP_MIB_INMSGS);
switch (skb->ip_summed) {
case CHECKSUM_HW:
}
}
- ICMP_INC_STATS_BH_FIELD(icmp_pointers[icmph->type].input_off);
+ ICMP_INC_STATS_BH(icmp_pointers[icmph->type].input_entry);
icmp_pointers[icmph->type].handler(skb);
drop:
kfree_skb(skb);
return 0;
error:
- ICMP_INC_STATS_BH(IcmpInErrors);
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
goto drop;
}
*/
static struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
[ICMP_ECHOREPLY] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutEchoReps),
- .input_off = offsetof(struct icmp_mib, IcmpInEchoReps),
+ .output_entry = ICMP_MIB_OUTECHOREPS,
+ .input_entry = ICMP_MIB_INECHOREPS,
.handler = icmp_discard,
},
[1] = {
- .output_off = offsetof(struct icmp_mib, dummy),
- .input_off = offsetof(struct icmp_mib,IcmpInErrors),
+ .output_entry = ICMP_MIB_DUMMY,
+ .input_entry = ICMP_MIB_INERRORS,
.handler = icmp_discard,
.error = 1,
},
[2] = {
- .output_off = offsetof(struct icmp_mib, dummy),
- .input_off = offsetof(struct icmp_mib,IcmpInErrors),
+ .output_entry = ICMP_MIB_DUMMY,
+ .input_entry = ICMP_MIB_INERRORS,
.handler = icmp_discard,
.error = 1,
},
[ICMP_DEST_UNREACH] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutDestUnreachs),
- .input_off = offsetof(struct icmp_mib, IcmpInDestUnreachs),
+ .output_entry = ICMP_MIB_OUTDESTUNREACHS,
+ .input_entry = ICMP_MIB_INDESTUNREACHS,
.handler = icmp_unreach,
.error = 1,
},
[ICMP_SOURCE_QUENCH] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutSrcQuenchs),
- .input_off = offsetof(struct icmp_mib, IcmpInSrcQuenchs),
+ .output_entry = ICMP_MIB_OUTSRCQUENCHS,
+ .input_entry = ICMP_MIB_INSRCQUENCHS,
.handler = icmp_unreach,
.error = 1,
},
[ICMP_REDIRECT] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutRedirects),
- .input_off = offsetof(struct icmp_mib, IcmpInRedirects),
+ .output_entry = ICMP_MIB_OUTREDIRECTS,
+ .input_entry = ICMP_MIB_INREDIRECTS,
.handler = icmp_redirect,
.error = 1,
},
[6] = {
- .output_off = offsetof(struct icmp_mib, dummy),
- .input_off = offsetof(struct icmp_mib, IcmpInErrors),
+ .output_entry = ICMP_MIB_DUMMY,
+ .input_entry = ICMP_MIB_INERRORS,
.handler = icmp_discard,
.error = 1,
},
[7] = {
- .output_off = offsetof(struct icmp_mib, dummy),
- .input_off = offsetof(struct icmp_mib, IcmpInErrors),
+ .output_entry = ICMP_MIB_DUMMY,
+ .input_entry = ICMP_MIB_INERRORS,
.handler = icmp_discard,
.error = 1,
},
[ICMP_ECHO] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutEchos),
- .input_off = offsetof(struct icmp_mib, IcmpInEchos),
+ .output_entry = ICMP_MIB_OUTECHOS,
+ .input_entry = ICMP_MIB_INECHOS,
.handler = icmp_echo,
},
[9] = {
- .output_off = offsetof(struct icmp_mib, dummy),
- .input_off = offsetof(struct icmp_mib, IcmpInErrors),
+ .output_entry = ICMP_MIB_DUMMY,
+ .input_entry = ICMP_MIB_INERRORS,
.handler = icmp_discard,
.error = 1,
},
[10] = {
- .output_off = offsetof(struct icmp_mib, dummy),
- .input_off = offsetof(struct icmp_mib, IcmpInErrors),
+ .output_entry = ICMP_MIB_DUMMY,
+ .input_entry = ICMP_MIB_INERRORS,
.handler = icmp_discard,
.error = 1,
},
[ICMP_TIME_EXCEEDED] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutTimeExcds),
- .input_off = offsetof(struct icmp_mib,IcmpInTimeExcds),
+ .output_entry = ICMP_MIB_OUTTIMEEXCDS,
+ .input_entry = ICMP_MIB_INTIMEEXCDS,
.handler = icmp_unreach,
.error = 1,
},
[ICMP_PARAMETERPROB] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutParmProbs),
- .input_off = offsetof(struct icmp_mib, IcmpInParmProbs),
+ .output_entry = ICMP_MIB_OUTPARMPROBS,
+ .input_entry = ICMP_MIB_INPARMPROBS,
.handler = icmp_unreach,
.error = 1,
},
[ICMP_TIMESTAMP] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutTimestamps),
- .input_off = offsetof(struct icmp_mib, IcmpInTimestamps),
+ .output_entry = ICMP_MIB_OUTTIMESTAMPS,
+ .input_entry = ICMP_MIB_INTIMESTAMPS,
.handler = icmp_timestamp,
},
[ICMP_TIMESTAMPREPLY] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutTimestampReps),
- .input_off = offsetof(struct icmp_mib, IcmpInTimestampReps),
+ .output_entry = ICMP_MIB_OUTTIMESTAMPREPS,
+ .input_entry = ICMP_MIB_INTIMESTAMPREPS,
.handler = icmp_discard,
},
[ICMP_INFO_REQUEST] = {
- .output_off = offsetof(struct icmp_mib, dummy),
- .input_off = offsetof(struct icmp_mib, dummy),
+ .output_entry = ICMP_MIB_DUMMY,
+ .input_entry = ICMP_MIB_DUMMY,
.handler = icmp_discard,
},
[ICMP_INFO_REPLY] = {
- .output_off = offsetof(struct icmp_mib, dummy),
- .input_off = offsetof(struct icmp_mib, dummy),
+ .output_entry = ICMP_MIB_DUMMY,
+ .input_entry = ICMP_MIB_DUMMY,
.handler = icmp_discard,
},
[ICMP_ADDRESS] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutAddrMasks),
- .input_off = offsetof(struct icmp_mib, IcmpInAddrMasks),
+ .output_entry = ICMP_MIB_OUTADDRMASKS,
+ .input_entry = ICMP_MIB_INADDRMASKS,
.handler = icmp_address,
},
[ICMP_ADDRESSREPLY] = {
- .output_off = offsetof(struct icmp_mib, IcmpOutAddrMaskReps),
- .input_off = offsetof(struct icmp_mib, IcmpInAddrMaskReps),
+ .output_entry = ICMP_MIB_OUTADDRMASKREPS,
+ .input_entry = ICMP_MIB_INADDRMASKREPS,
.handler = icmp_address_reply,
},
};
static int igmp_mc_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
- seq_printf(seq,
- "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
+ seq_puts(seq,
+ "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
else {
struct ip_mc_list *im = (struct ip_mc_list *)v;
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
{
struct ip_options * opt = &(IPCB(skb)->opt);
- IP_INC_STATS_BH(OutForwDatagrams);
+ IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
if (unlikely(opt->optlen))
ip_forward_options(skb);
spin_unlock(&qp->lock);
ipq_put(qp);
- IP_INC_STATS_BH(ReasmFails);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
}
}
ipq_kill(qp);
- IP_INC_STATS_BH(ReasmTimeout);
- IP_INC_STATS_BH(ReasmFails);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) {
struct sk_buff *head = qp->fragments;
iph = head->nh.iph;
iph->frag_off = 0;
iph->tot_len = htons(len);
- IP_INC_STATS_BH(ReasmOKs);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
qp->fragments = NULL;
return head;
"Oversized IP packet from %d.%d.%d.%d.\n",
NIPQUAD(qp->saddr));
out_fail:
- IP_INC_STATS_BH(ReasmFails);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
return NULL;
}
struct ipq *qp;
struct net_device *dev;
- IP_INC_STATS_BH(ReasmReqds);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
/* Start by cleaning up the memory. */
if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh)
return ret;
}
- IP_INC_STATS_BH(ReasmFails);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return NULL;
}
protocol = -ret;
goto resubmit;
}
- IP_INC_STATS_BH(InDelivers);
+ IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
} else {
if (!raw_sk) {
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- IP_INC_STATS_BH(InUnknownProtos);
+ IP_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
}
} else
- IP_INC_STATS_BH(InDelivers);
+ IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
kfree_skb(skb);
}
}
*/
if (skb_cow(skb, skb_headroom(skb))) {
- IP_INC_STATS_BH(InDiscards);
+ IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
goto drop;
}
iph = skb->nh.iph;
return dst_input(skb);
inhdr_error:
- IP_INC_STATS_BH(InHdrErrors);
+ IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
drop:
kfree_skb(skb);
return NET_RX_DROP;
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
- IP_INC_STATS_BH(InReceives);
+ IP_INC_STATS_BH(IPSTATS_MIB_INRECEIVES);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- IP_INC_STATS_BH(InDiscards);
+ IP_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
goto out;
}
ip_rcv_finish);
inhdr_error:
- IP_INC_STATS_BH(InHdrErrors);
+ IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
drop:
kfree_skb(skb);
out:
/*
* If the indicated interface is up and running, send the packet.
*/
- IP_INC_STATS(OutRequests);
+ IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
{
struct sk_buff *skb = *pskb;
- IP_INC_STATS(OutRequests);
+ IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
if ((skb->len > dst_pmtu(skb->dst) || skb_shinfo(skb)->frag_list) &&
!skb_shinfo(skb)->tso_size)
dst_output);
no_route:
- IP_INC_STATS(OutNoRoutes);
+ IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EHOSTUNREACH;
}
}
if (err == 0) {
- IP_INC_STATS(FragOKs);
+ IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
return 0;
}
kfree_skb(frag);
frag = skb;
}
- IP_INC_STATS(FragFails);
+ IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
return err;
}
* Put this fragment into the sending queue.
*/
- IP_INC_STATS(FragCreates);
+ IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
iph->tot_len = htons(len + hlen);
goto fail;
}
kfree_skb(skb);
- IP_INC_STATS(FragOKs);
+ IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
return err;
fail:
kfree_skb(skb);
- IP_INC_STATS(FragFails);
+ IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
return err;
}
error:
inet->cork.length -= length;
- IP_INC_STATS(OutDiscards);
+ IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
return err;
}
error:
inet->cork.length -= size;
- IP_INC_STATS(OutDiscards);
+ IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
return err;
}
return err;
error:
- IP_INC_STATS(OutDiscards);
+ IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
goto out;
}
#include <asm/scatterlist.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
-#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/icmp.h>
{
struct ip_options * opt = &(IPCB(skb)->opt);
- IP_INC_STATS_BH(OutForwDatagrams);
+ IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
if (unlikely(opt->optlen))
ip_forward_options(skb);
to blackhole.
*/
- IP_INC_STATS_BH(FragFails);
+ IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
ip_rt_put(rt);
goto out_free;
}
static int
proc_do_defense_mode(ctl_table *table, int write, struct file * filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int val = *valp;
int rc;
- rc = proc_dointvec(table, write, filp, buffer, lenp);
+ rc = proc_dointvec(table, write, filp, buffer, lenp, ppos);
if (write && (*valp != val)) {
if ((*valp < 0) || (*valp > 3)) {
/* Restore the correct value */
static int
proc_do_sync_threshold(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
int val[2];
/* backup the value first */
memcpy(val, valp, sizeof(val));
- rc = proc_dointvec(table, write, filp, buffer, lenp);
+ rc = proc_dointvec(table, write, filp, buffer, lenp, ppos);
if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) {
/* Restore the correct value */
memcpy(valp, val, sizeof(val));
*/
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/in.h>
* First port is set to the default port.
*/
static int ports[IP_VS_APP_MAX_PORTS] = {21, 0};
+static int ports_c;
+module_param_array(ports, int, ports_c, 0);
/*
* Debug level
*/
#ifdef CONFIG_IP_VS_DEBUG
static int debug=0;
-MODULE_PARM(debug, "i");
+module_param(debug, int, 0);
#endif
-MODULE_PARM(ports, "1-" __MODULE_STRING(IP_VS_APP_MAX_PORTS) "i");
/* Dummy variable */
static int ip_vs_ftp_pasv;
while (data <= data_limit - 6) {
if (strnicmp(data, "PASV\r\n", 6) == 0) {
/* Passive mode on */
- IP_VS_DBG(1-debug, "got PASV at %d of %d\n",
+ IP_VS_DBG(1-debug, "got PASV at %zd of %zd\n",
data - data_start,
data_limit - data_start);
cp->app_data = &ip_vs_ftp_pasv;
(*pskb)->len - tcphoff,
cp->protocol,
(*pskb)->csum);
- IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%d)\n",
+ IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
pp->name, tcph->check,
(char*)&(tcph->check) - (char*)tcph);
}
(*pskb)->csum);
if (udph->check == 0)
udph->check = 0xFFFF;
- IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%d)\n",
+ IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
pp->name, udph->check,
(char*)&(udph->check) - (char*)udph);
}
static int
ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length)
{
- struct msghdr msg;
- mm_segment_t oldfs;
- struct iovec iov;
+ struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL};
+ struct kvec iov;
int len;
EnterFunction(7);
iov.iov_base = (void *)buffer;
iov.iov_len = length;
- msg.msg_name = 0;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL;
-
- oldfs = get_fs(); set_fs(KERNEL_DS);
- len = sock_sendmsg(sock, &msg, (size_t)(length));
- set_fs(oldfs);
+
+ len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length));
LeaveFunction(7);
return len;
static int
ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
{
- struct msghdr msg;
- struct iovec iov;
+ struct msghdr msg = {NULL,};
+ struct kvec iov;
int len;
- mm_segment_t oldfs;
EnterFunction(7);
/* Receive a packet */
iov.iov_base = buffer;
iov.iov_len = (size_t)buflen;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- oldfs = get_fs(); set_fs(KERNEL_DS);
- len = sock_recvmsg(sock, &msg, buflen, 0);
- set_fs(oldfs);
+
+ len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0);
if (len < 0)
return -1;
#define MAX_PORTS 8
static int ports[MAX_PORTS];
static int ports_c;
-#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
-#endif
static int loose;
MODULE_PARM(loose, "i");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("IRC (DCC) connection tracking helper");
MODULE_LICENSE("GPL");
-#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of IRC servers");
MODULE_PARM(max_dcc_channels, "i");
MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per IRC session");
MODULE_PARM(dcc_timeout, "i");
MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels");
-#endif
static char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " };
#define MINMATCHLEN 5
#define MAX_PORTS 8
static int ports[MAX_PORTS];
static int ports_c;
-#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of tftp servers");
-#endif
#if 0
#define DEBUGP(format, args...) printk("%s:%s:" format, \
static int ports[MAX_PORTS];
static int ports_c;
-#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
-#endif
DECLARE_LOCK_EXTERN(ip_ftp_lock);
MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
MODULE_DESCRIPTION("IRC (DCC) NAT helper");
MODULE_LICENSE("GPL");
-#ifdef MODULE_PARM
MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of IRC servers");
-#endif
/* protects irc part of conntracks */
DECLARE_LOCK_EXTERN(ip_irc_lock);
return 1;
}
+/*
+ * Fast checksum update for possibly oddly-aligned UDP byte, from the
+ * code example in the draft.
+ */
+static void fast_csum(unsigned char *csum,
+ const unsigned char *optr,
+ const unsigned char *nptr,
+ int odd)
+{
+ long x, old, new;
+
+ x = csum[0] * 256 + csum[1];
+
+ x =~ x & 0xFFFF;
+
+ if (odd) old = optr[0] * 256;
+ else old = optr[0];
+
+ x -= old & 0xFFFF;
+ if (x <= 0) {
+ x--;
+ x &= 0xFFFF;
+ }
+
+ if (odd) new = nptr[0] * 256;
+ else new = nptr[0];
+
+ x += new & 0xFFFF;
+ if (x & 0x10000) {
+ x++;
+ x &= 0xFFFF;
+ }
+
+ x =~ x & 0xFFFF;
+ csum[0] = x / 256;
+ csum[1] = x & 0xFF;
+}
+
+/*
+ * Mangle IP address.
+ * - begin points to the start of the snmp messgae
+ * - addr points to the start of the address
+ */
+static inline void mangle_address(unsigned char *begin,
+ unsigned char *addr,
+ const struct oct1_map *map,
+ u_int16_t *check)
+{
+ if (map->from == NOCT1(*addr)) {
+ u_int32_t old;
+
+ if (debug)
+ memcpy(&old, (unsigned char *)addr, sizeof(old));
+
+ *addr = map->to;
+
+ /* Update UDP checksum if being used */
+ if (*check) {
+ unsigned char odd = !((addr - begin) % 2);
+
+ fast_csum((unsigned char *)check,
+ &map->from, &map->to, odd);
+
+ }
+
+ if (debug)
+ printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
+ "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
+ }
+}
+
static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
struct snmp_v1_trap *trap,
const struct oct1_map *map,
printk("\n");
}
-/*
- * Fast checksum update for possibly oddly-aligned UDP byte, from the
- * code example in the draft.
- */
-static void fast_csum(unsigned char *csum,
- const unsigned char *optr,
- const unsigned char *nptr,
- int odd)
-{
- long x, old, new;
-
- x = csum[0] * 256 + csum[1];
-
- x =~ x & 0xFFFF;
-
- if (odd) old = optr[0] * 256;
- else old = optr[0];
-
- x -= old & 0xFFFF;
- if (x <= 0) {
- x--;
- x &= 0xFFFF;
- }
-
- if (odd) new = nptr[0] * 256;
- else new = nptr[0];
-
- x += new & 0xFFFF;
- if (x & 0x10000) {
- x++;
- x &= 0xFFFF;
- }
-
- x =~ x & 0xFFFF;
- csum[0] = x / 256;
- csum[1] = x & 0xFF;
-}
-
-/*
- * Mangle IP address.
- * - begin points to the start of the snmp messgae
- * - addr points to the start of the address
- */
-static inline void mangle_address(unsigned char *begin,
- unsigned char *addr,
- const struct oct1_map *map,
- u_int16_t *check)
-{
- if (map->from == NOCT1(*addr)) {
- u_int32_t old;
-
- if (debug)
- memcpy(&old, (unsigned char *)addr, sizeof(old));
-
- *addr = map->to;
-
- /* Update UDP checksum if being used */
- if (*check) {
- unsigned char odd = !((addr - begin) % 2);
-
- fast_csum((unsigned char *)check,
- &map->from, &map->to, odd);
-
- }
-
- if (debug)
- printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
- "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
- }
-}
-
/*
* Parse and mangle SNMP message according to mapping.
* (And this is the fucking 'basic' method).
static int ports[MAX_PORTS];
static int ports_c = 0;
-#ifdef MODULE_PARM
MODULE_PARM(ports,"1-" __MODULE_STRING(MAX_PORTS) "i");
MODULE_PARM_DESC(ports, "port numbers of tftp servers");
-#endif
#if 0
#define DEBUGP(format, args...) printk("%s:%s:" format, \
};
static unsigned long
-__fold_field(void *mib[], int offt)
+fold_field(void *mib[], int offt)
{
unsigned long res = 0;
int i;
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
- res +=
- *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
- offt));
- res +=
- *((unsigned long *) (((void *) per_cpu_ptr(mib[1], i)) +
- offt));
+ res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
+ res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
}
return res;
}
-#define fold_field(_mib, _nr) __fold_field(_mib, (sizeof(unsigned long) * (_nr)))
-
/* snmp items */
-static struct snmp_item snmp4_ipstats_list[] = {
-#define __SNMP_GEN(x,y) SNMP_ITEM(struct ipstats_mib, x, y)
-#define SNMP_GEN(x) __SNMP_GEN(x, #x)
- SNMP_GEN(InReceives),
- SNMP_GEN(InHdrErrors),
- SNMP_GEN(InAddrErrors),
- __SNMP_GEN(OutForwDatagrams,"ForwDatagrams"), /* for backward compatibility */
- SNMP_GEN(InUnknownProtos),
- SNMP_GEN(InDiscards),
- SNMP_GEN(InDelivers),
- SNMP_GEN(OutRequests),
- SNMP_GEN(OutDiscards),
- SNMP_GEN(OutNoRoutes),
- SNMP_GEN(ReasmTimeout),
- SNMP_GEN(ReasmReqds),
- SNMP_GEN(ReasmOKs),
- SNMP_GEN(ReasmFails),
- SNMP_GEN(FragOKs),
- SNMP_GEN(FragFails),
- SNMP_GEN(FragCreates),
- SNMP_ITEM_SENTINEL
-#undef SNMP_GEN
+static struct snmp_mib snmp4_ipstats_list[] = {
+ SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INRECEIVES),
+ SNMP_MIB_ITEM("InHdrErrors", IPSTATS_MIB_INHDRERRORS),
+ SNMP_MIB_ITEM("InAddrErrors", IPSTATS_MIB_INADDRERRORS),
+ SNMP_MIB_ITEM("ForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
+ SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS),
+ SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS),
+ SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS),
+ SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTREQUESTS),
+ SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS),
+ SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
+ SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
+ SNMP_MIB_ITEM("ReasmReqds", IPSTATS_MIB_REASMREQDS),
+ SNMP_MIB_ITEM("ReasmOKs", IPSTATS_MIB_REASMOKS),
+ SNMP_MIB_ITEM("ReasmFails", IPSTATS_MIB_REASMFAILS),
+ SNMP_MIB_ITEM("FragOKs", IPSTATS_MIB_FRAGOKS),
+ SNMP_MIB_ITEM("FragFails", IPSTATS_MIB_FRAGFAILS),
+ SNMP_MIB_ITEM("FragCreates", IPSTATS_MIB_FRAGCREATES),
+ SNMP_MIB_SENTINEL
+};
+
+static struct snmp_mib snmp4_icmp_list[] = {
+ SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS),
+ SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS),
+ SNMP_MIB_ITEM("InDestUnreachs", ICMP_MIB_INDESTUNREACHS),
+ SNMP_MIB_ITEM("InTimeExcds", ICMP_MIB_INTIMEEXCDS),
+ SNMP_MIB_ITEM("InParmProbs", ICMP_MIB_INPARMPROBS),
+ SNMP_MIB_ITEM("InSrcQuenchs", ICMP_MIB_INSRCQUENCHS),
+ SNMP_MIB_ITEM("InRedirects", ICMP_MIB_INREDIRECTS),
+ SNMP_MIB_ITEM("InEchos", ICMP_MIB_INECHOS),
+ SNMP_MIB_ITEM("InEchoReps", ICMP_MIB_INECHOREPS),
+ SNMP_MIB_ITEM("InTimestamps", ICMP_MIB_INTIMESTAMPS),
+ SNMP_MIB_ITEM("InTimestampReps", ICMP_MIB_INTIMESTAMPREPS),
+ SNMP_MIB_ITEM("InAddrMasks", ICMP_MIB_INADDRMASKS),
+ SNMP_MIB_ITEM("InAddrMaskReps", ICMP_MIB_INADDRMASKREPS),
+ SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS),
+ SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS),
+ SNMP_MIB_ITEM("OutDestUnreachs", ICMP_MIB_OUTDESTUNREACHS),
+ SNMP_MIB_ITEM("OutTimeExcds", ICMP_MIB_OUTTIMEEXCDS),
+ SNMP_MIB_ITEM("OutParmProbs", ICMP_MIB_OUTPARMPROBS),
+ SNMP_MIB_ITEM("OutSrcQuenchs", ICMP_MIB_OUTSRCQUENCHS),
+ SNMP_MIB_ITEM("OutRedirects", ICMP_MIB_OUTREDIRECTS),
+ SNMP_MIB_ITEM("OutEchos", ICMP_MIB_OUTECHOS),
+ SNMP_MIB_ITEM("OutEchoReps", ICMP_MIB_OUTECHOREPS),
+ SNMP_MIB_ITEM("OutTimestamps", ICMP_MIB_OUTTIMESTAMPS),
+ SNMP_MIB_ITEM("OutTimestampReps", ICMP_MIB_OUTTIMESTAMPREPS),
+ SNMP_MIB_ITEM("OutAddrMasks", ICMP_MIB_OUTADDRMASKS),
+ SNMP_MIB_ITEM("OutAddrMaskReps", ICMP_MIB_OUTADDRMASKREPS),
+ SNMP_MIB_SENTINEL
+};
+
+static struct snmp_mib snmp4_tcp_list[] = {
+ SNMP_MIB_ITEM("RtoAlgorithm", TCP_MIB_RTOALGORITHM),
+ SNMP_MIB_ITEM("RtoMin", TCP_MIB_RTOMIN),
+ SNMP_MIB_ITEM("RtoMax", TCP_MIB_RTOMAX),
+ SNMP_MIB_ITEM("MaxConn", TCP_MIB_MAXCONN),
+ SNMP_MIB_ITEM("ActiveOpens", TCP_MIB_ACTIVEOPENS),
+ SNMP_MIB_ITEM("PassiveOpens", TCP_MIB_PASSIVEOPENS),
+ SNMP_MIB_ITEM("AttemptFails", TCP_MIB_ATTEMPTFAILS),
+ SNMP_MIB_ITEM("EstabResets", TCP_MIB_ESTABRESETS),
+ SNMP_MIB_ITEM("CurrEstab", TCP_MIB_CURRESTAB),
+ SNMP_MIB_ITEM("InSegs", TCP_MIB_INSEGS),
+ SNMP_MIB_ITEM("OutSegs", TCP_MIB_OUTSEGS),
+ SNMP_MIB_ITEM("RetransSegs", TCP_MIB_RETRANSSEGS),
+ SNMP_MIB_ITEM("InErrs", TCP_MIB_INERRS),
+ SNMP_MIB_ITEM("OutRsts", TCP_MIB_OUTRSTS),
+ SNMP_MIB_SENTINEL
+};
+
+static struct snmp_mib snmp4_udp_list[] = {
+ SNMP_MIB_ITEM("InDatagrams", UDP_MIB_INDATAGRAMS),
+ SNMP_MIB_ITEM("NoPorts", UDP_MIB_NOPORTS),
+ SNMP_MIB_ITEM("InErrors", UDP_MIB_INERRORS),
+ SNMP_MIB_ITEM("OutDatagrams", UDP_MIB_OUTDATAGRAMS),
+ SNMP_MIB_SENTINEL
+};
+
+static struct snmp_mib snmp4_net_list[] = {
+ SNMP_MIB_ITEM("SyncookiesSent", LINUX_MIB_SYNCOOKIESSENT),
+ SNMP_MIB_ITEM("SyncookiesRecv", LINUX_MIB_SYNCOOKIESRECV),
+ SNMP_MIB_ITEM("SyncookiesFailed", LINUX_MIB_SYNCOOKIESFAILED),
+ SNMP_MIB_ITEM("EmbryonicRsts", LINUX_MIB_EMBRYONICRSTS),
+ SNMP_MIB_ITEM("PruneCalled", LINUX_MIB_PRUNECALLED),
+ SNMP_MIB_ITEM("RcvPruned", LINUX_MIB_RCVPRUNED),
+ SNMP_MIB_ITEM("OfoPruned", LINUX_MIB_OFOPRUNED),
+ SNMP_MIB_ITEM("OutOfWindowIcmps", LINUX_MIB_OUTOFWINDOWICMPS),
+ SNMP_MIB_ITEM("LockDroppedIcmps", LINUX_MIB_LOCKDROPPEDICMPS),
+ SNMP_MIB_ITEM("ArpFilter", LINUX_MIB_ARPFILTER),
+ SNMP_MIB_ITEM("TW", LINUX_MIB_TIMEWAITED),
+ SNMP_MIB_ITEM("TWRecycled", LINUX_MIB_TIMEWAITRECYCLED),
+ SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED),
+ SNMP_MIB_ITEM("PAWSPassive", LINUX_MIB_PAWSPASSIVEREJECTED),
+ SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED),
+ SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED),
+ SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS),
+ SNMP_MIB_ITEM("DelayedACKLocked", LINUX_MIB_DELAYEDACKLOCKED),
+ SNMP_MIB_ITEM("DelayedACKLost", LINUX_MIB_DELAYEDACKLOST),
+ SNMP_MIB_ITEM("ListenOverflows", LINUX_MIB_LISTENOVERFLOWS),
+ SNMP_MIB_ITEM("ListenDrops", LINUX_MIB_LISTENDROPS),
+ SNMP_MIB_ITEM("TCPPrequeued", LINUX_MIB_TCPPREQUEUED),
+ SNMP_MIB_ITEM("TCPDirectCopyFromBacklog", LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG),
+ SNMP_MIB_ITEM("TCPDirectCopyFromPrequeue", LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE),
+ SNMP_MIB_ITEM("TCPPrequeueDropped", LINUX_MIB_TCPPREQUEUEDROPPED),
+ SNMP_MIB_ITEM("TCPHPHits", LINUX_MIB_TCPHPHITS),
+ SNMP_MIB_ITEM("TCPHPHitsToUser", LINUX_MIB_TCPHPHITSTOUSER),
+ SNMP_MIB_ITEM("TCPPureAcks", LINUX_MIB_TCPPUREACKS),
+ SNMP_MIB_ITEM("TCPHPAcks", LINUX_MIB_TCPHPACKS),
+ SNMP_MIB_ITEM("TCPRenoRecovery", LINUX_MIB_TCPRENORECOVERY),
+ SNMP_MIB_ITEM("TCPSackRecovery", LINUX_MIB_TCPSACKRECOVERY),
+ SNMP_MIB_ITEM("TCPSACKReneging", LINUX_MIB_TCPSACKRENEGING),
+ SNMP_MIB_ITEM("TCPFACKReorder", LINUX_MIB_TCPFACKREORDER),
+ SNMP_MIB_ITEM("TCPSACKReorder", LINUX_MIB_TCPSACKREORDER),
+ SNMP_MIB_ITEM("TCPRenoReorder", LINUX_MIB_TCPRENOREORDER),
+ SNMP_MIB_ITEM("TCPTSReorder", LINUX_MIB_TCPTSREORDER),
+ SNMP_MIB_ITEM("TCPFullUndo", LINUX_MIB_TCPFULLUNDO),
+ SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO),
+ SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO),
+ SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO),
+ SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS),
+ SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT),
+ SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES),
+ SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES),
+ SNMP_MIB_ITEM("TCPLossFailures", LINUX_MIB_TCPLOSSFAILURES),
+ SNMP_MIB_ITEM("TCPFastRetrans", LINUX_MIB_TCPFASTRETRANS),
+ SNMP_MIB_ITEM("TCPForwardRetrans", LINUX_MIB_TCPFORWARDRETRANS),
+ SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS),
+ SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS),
+ SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL),
+ SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL),
+ SNMP_MIB_ITEM("TCPSchedulerFailed", LINUX_MIB_TCPSCHEDULERFAILED),
+ SNMP_MIB_ITEM("TCPRcvCollapsed", LINUX_MIB_TCPRCVCOLLAPSED),
+ SNMP_MIB_ITEM("TCPDSACKOldSent", LINUX_MIB_TCPDSACKOLDSENT),
+ SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT),
+ SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV),
+ SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV),
+ SNMP_MIB_ITEM("TCPAbortOnSyn", LINUX_MIB_TCPABORTONSYN),
+ SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA),
+ SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE),
+ SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY),
+ SNMP_MIB_ITEM("TCPAbortOnTimeout", LINUX_MIB_TCPABORTONTIMEOUT),
+ SNMP_MIB_ITEM("TCPAbortOnLinger", LINUX_MIB_TCPABORTONLINGER),
+ SNMP_MIB_ITEM("TCPAbortFailed", LINUX_MIB_TCPABORTFAILED),
+ SNMP_MIB_ITEM("TCPMemoryPressures", LINUX_MIB_TCPMEMORYPRESSURES),
+ SNMP_MIB_SENTINEL
};
/*
{
int i;
- seq_printf(seq, "Ip: Forwarding DefaultTTL");
+ seq_puts(seq, "Ip: Forwarding DefaultTTL");
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
seq_printf(seq, " %s", snmp4_ipstats_list[i].name);
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- __fold_field((void **) ip_statistics,
- snmp4_ipstats_list[i].offset));
-
- seq_printf(seq, "\nIcmp: InMsgs InErrors InDestUnreachs InTimeExcds "
- "InParmProbs InSrcQuenchs InRedirects InEchos "
- "InEchoReps InTimestamps InTimestampReps InAddrMasks "
- "InAddrMaskReps OutMsgs OutErrors OutDestUnreachs "
- "OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects "
- "OutEchos OutEchoReps OutTimestamps OutTimestampReps "
- "OutAddrMasks OutAddrMaskReps\nIcmp:");
-
- for (i = 0;
- i < offsetof(struct icmp_mib, dummy) / sizeof(unsigned long); i++)
+ fold_field((void **) ip_statistics,
+ snmp4_ipstats_list[i].entry));
+
+ seq_puts(seq, "\nIcmp:");
+ for (i = 0; snmp4_icmp_list[i].name != NULL; i++)
+ seq_printf(seq, " %s", snmp4_icmp_list[i].name);
+
+ seq_puts(seq, "\nIcmp:");
+ for (i = 0; snmp4_icmp_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- fold_field((void **) icmp_statistics, i));
-
- seq_printf(seq, "\nTcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens "
- "PassiveOpens AttemptFails EstabResets CurrEstab "
- "InSegs OutSegs RetransSegs InErrs OutRsts\nTcp:");
-
- for (i = 0;
- i < offsetof(struct tcp_mib, __pad) / sizeof(unsigned long); i++) {
- if (i == (offsetof(struct tcp_mib, TcpMaxConn) / sizeof(unsigned long)))
- /* MaxConn field is negative, RFC 2012 */
- seq_printf(seq, " %ld",
- fold_field((void **) tcp_statistics, i));
+ fold_field((void **) icmp_statistics,
+ snmp4_icmp_list[i].entry));
+
+ seq_puts(seq, "\nTcp:");
+ for (i = 0; snmp4_tcp_list[i].name != NULL; i++)
+ seq_printf(seq, " %s", snmp4_tcp_list[i].name);
+
+ seq_puts(seq, "\nTcp:");
+ for (i = 0; snmp4_tcp_list[i].name != NULL; i++) {
+ /* MaxConn field is signed, RFC 2012 */
+ if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
+ seq_printf(seq, " %ld",
+ fold_field((void **) tcp_statistics,
+ snmp4_tcp_list[i].entry));
else
- seq_printf(seq, " %lu",
- fold_field((void **) tcp_statistics, i));
+ seq_printf(seq, " %lu",
+ fold_field((void **) tcp_statistics,
+ snmp4_tcp_list[i].entry));
}
- seq_printf(seq, "\nUdp: InDatagrams NoPorts InErrors OutDatagrams\n"
- "Udp:");
+ seq_puts(seq, "\nUdp:");
+ for (i = 0; snmp4_udp_list[i].name != NULL; i++)
+ seq_printf(seq, " %s", snmp4_udp_list[i].name);
- for (i = 0;
- i < offsetof(struct udp_mib, __pad) / sizeof(unsigned long); i++)
- seq_printf(seq, " %lu",
- fold_field((void **) udp_statistics, i));
+ seq_puts(seq, "\nUdp:");
+ for (i = 0; snmp4_udp_list[i].name != NULL; i++)
+ seq_printf(seq, " %lu",
+ fold_field((void **) udp_statistics,
+ snmp4_udp_list[i].entry));
seq_putc(seq, '\n');
return 0;
{
int i;
- seq_puts(seq, "TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed"
- " EmbryonicRsts PruneCalled RcvPruned OfoPruned"
- " OutOfWindowIcmps LockDroppedIcmps ArpFilter"
- " TW TWRecycled TWKilled"
- " PAWSPassive PAWSActive PAWSEstab"
- " DelayedACKs DelayedACKLocked DelayedACKLost"
- " ListenOverflows ListenDrops"
- " TCPPrequeued TCPDirectCopyFromBacklog"
- " TCPDirectCopyFromPrequeue TCPPrequeueDropped"
- " TCPHPHits TCPHPHitsToUser"
- " TCPPureAcks TCPHPAcks"
- " TCPRenoRecovery TCPSackRecovery"
- " TCPSACKReneging"
- " TCPFACKReorder TCPSACKReorder TCPRenoReorder"
- " TCPTSReorder"
- " TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo"
- " TCPLoss TCPLostRetransmit"
- " TCPRenoFailures TCPSackFailures TCPLossFailures"
- " TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans"
- " TCPTimeouts"
- " TCPRenoRecoveryFail TCPSackRecoveryFail"
- " TCPSchedulerFailed TCPRcvCollapsed"
- " TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv"
- " TCPDSACKOfoRecv"
- " TCPAbortOnSyn TCPAbortOnData TCPAbortOnClose"
- " TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger"
- " TCPAbortFailed TCPMemoryPressures\n"
- "TcpExt:");
- for (i = 0;
- i < offsetof(struct linux_mib, __pad) / sizeof(unsigned long);
- i++)
- seq_printf(seq, " %lu",
- fold_field((void **) net_statistics, i));
+ seq_puts(seq, "\nTcpExt:");
+ for (i = 0; snmp4_net_list[i].name != NULL; i++)
+ seq_printf(seq, " %s", snmp4_net_list[i].name);
+
+ seq_puts(seq, "\nTcpExt:");
+ for (i = 0; snmp4_net_list[i].name != NULL; i++)
+ seq_printf(seq, " %lu",
+ fold_field((void **) net_statistics,
+ snmp4_net_list[i].entry));
+
seq_putc(seq, '\n');
return 0;
}
err = -EFAULT;
kfree_skb(skb);
error:
- IP_INC_STATS(OutDiscards);
+ IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
return err;
}
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
done:
skb_free_datagram(sk, skb);
-out: return err ? : copied;
+out: return err ? err : copied;
}
static int raw_init(struct sock *sk)
struct proto raw_prot = {
.name = "RAW",
.close = raw_close,
- .connect = udp_connect,
+ .connect = ip4_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = raw_ioctl,
.init = raw_init,
rth->rt_flags = flags;
-#ifdef CONFIG_NET_FASTROUTE
- if (netdev_fastroute && !(flags&(RTCF_NAT|RTCF_MASQ|RTCF_DOREDIRECT))) {
- struct net_device *odev = rth->u.dst.dev;
- if (odev != dev &&
- dev->accept_fastpath &&
- odev->mtu >= dev->mtu &&
- dev->accept_fastpath(dev, &rth->u.dst) == 0)
- rth->rt_flags |= RTCF_FAST;
- }
-#endif
-
intern:
err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
done:
static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
struct file *filp, void __user *buffer,
- size_t *lenp)
+ size_t *lenp, loff_t *ppos)
{
if (write) {
- proc_dointvec(ctl, write, filp, buffer, lenp);
+ proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
rt_cache_flush(flush_delay);
return 0;
}
;
*mssp = msstab[mssind] + 1;
- NET_INC_STATS_BH(SyncookiesSent);
+ NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
return secure_tcp_syn_cookie(skb->nh.iph->saddr, skb->nh.iph->daddr,
skb->h.th->source, skb->h.th->dest,
if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
(mss = cookie_check(skb, cookie)) == 0) {
- NET_INC_STATS_BH(SyncookiesFailed);
+ NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}
- NET_INC_STATS_BH(SyncookiesRecv);
+ NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV);
req = tcp_openreq_alloc();
ret = NULL;
static
int ipv4_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int val = ipv4_devconf.forwarding;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (write && ipv4_devconf.forwarding != val)
inet_forward_change();
void tcp_enter_memory_pressure(void)
{
if (!tcp_memory_pressure) {
- NET_INC_STATS(TCPMemoryPressures);
+ NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
tcp_memory_pressure = 1;
}
}
* calculation of whether or not we must ACK for the sake of
* a window update.
*/
-static void cleanup_rbuf(struct sock *sk, int copied)
+void cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_opt *tp = tcp_sk(sk);
int time_to_ack = 0;
struct sk_buff *skb;
struct tcp_opt *tp = tcp_sk(sk);
- NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue));
+ NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
/* RX process wants to run with disabled BHs, though it is not
* necessary */
/* __ Restore normal policy in scheduler __ */
if ((chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk);
+ NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
len -= chunk;
copied += chunk;
}
tcp_prequeue_process(sk);
if ((chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
+ NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
len -= chunk;
copied += chunk;
}
tcp_prequeue_process(sk);
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
- NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
+ NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
len -= chunk;
copied += chunk;
}
*/
if (data_was_unread) {
/* Unread data was tossed, zap the connection. */
- NET_INC_STATS_USER(TCPAbortOnClose);
+ NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_KERNEL);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
- NET_INC_STATS_USER(TCPAbortOnData);
+ NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
} else if (tcp_close_state(sk)) {
/* We FIN if the application ate all the data before
* zapping the connection.
if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
- NET_INC_STATS_BH(TCPAbortOnLinger);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
} else {
int tmo = tcp_fin_time(tp);
"sockets\n");
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
- NET_INC_STATS_BH(TCPAbortOnMemory);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
}
}
atomic_inc(&tcp_orphan_count);
EXPORT_SYMBOL(tcp_shutdown);
EXPORT_SYMBOL(tcp_statistics);
EXPORT_SYMBOL(tcp_timewait_cachep);
+EXPORT_SYMBOL_GPL(cleanup_rbuf);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
+static void init_bictcp(struct tcp_opt *tp)
+{
+ tp->bictcp.cnt = 0;
+
+ tp->bictcp.last_max_cwnd = 0;
+ tp->bictcp.last_cwnd = 0;
+ tp->bictcp.last_stamp = 0;
+}
+
/* 5. Recalculate window clamp after socket hit its memory bounds. */
static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp)
{
/* This exciting event is worth to be remembered. 8) */
if (ts)
- NET_INC_STATS_BH(TCPTSReorder);
+ NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
else if (IsReno(tp))
- NET_INC_STATS_BH(TCPRenoReorder);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
else if (IsFack(tp))
- NET_INC_STATS_BH(TCPFACKReorder);
+ NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
else
- NET_INC_STATS_BH(TCPSACKReorder);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
#if FASTRETRANS_DEBUG > 1
printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
tp->sack_ok, tp->ca_state,
if (before(start_seq, ack)) {
dup_sack = 1;
tp->sack_ok |= 4;
- NET_INC_STATS_BH(TCPDSACKRecv);
+ NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1 &&
!after(end_seq, ntohl(sp[1].end_seq)) &&
!before(start_seq, ntohl(sp[1].start_seq))) {
dup_sack = 1;
tp->sack_ok |= 4;
- NET_INC_STATS_BH(TCPDSACKOfoRecv);
+ NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
}
/* D-SACK for already forgotten data...
tp->lost_out++;
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
flag |= FLAG_DATA_SACKED;
- NET_INC_STATS_BH(TCPLostRetransmit);
+ NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
}
}
}
tcp_set_ca_state(tp, TCP_CA_Loss);
tp->high_seq = tp->frto_highmark;
TCP_ECN_queue_cwr(tp);
+
+ init_bictcp(tp);
}
void tcp_clear_retrans(struct tcp_opt *tp)
*/
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
- NET_INC_STATS_BH(TCPSACKReneging);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
tcp_enter_loss(sk, 1);
tp->retransmits++;
DBGUNDO(sk, tp, tp->ca_state == TCP_CA_Loss ? "loss" : "retrans");
tcp_undo_cwr(tp, 1);
if (tp->ca_state == TCP_CA_Loss)
- NET_INC_STATS_BH(TCPLossUndo);
+ NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
else
- NET_INC_STATS_BH(TCPFullUndo);
+ NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
tp->undo_marker = 0;
}
if (tp->snd_una == tp->high_seq && IsReno(tp)) {
DBGUNDO(sk, tp, "D-SACK");
tcp_undo_cwr(tp, 1);
tp->undo_marker = 0;
- NET_INC_STATS_BH(TCPDSACKUndo);
+ NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
}
}
DBGUNDO(sk, tp, "Hoe");
tcp_undo_cwr(tp, 0);
- NET_INC_STATS_BH(TCPPartialUndo);
+ NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
/* So... Do not make Hoe's retransmit yet.
* If the first packet was delayed, the rest
tp->lost_out = 0;
tp->left_out = tp->sacked_out;
tcp_undo_cwr(tp, 1);
- NET_INC_STATS_BH(TCPLossUndo);
+ NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
tp->retransmits = 0;
tp->undo_marker = 0;
if (!IsReno(tp))
tp->ca_state != TCP_CA_Open &&
tp->fackets_out > tp->reordering) {
tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
- NET_INC_STATS_BH(TCPLoss);
+ NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
}
/* D. Synchronize left_out to current state. */
/* Otherwise enter Recovery state */
if (IsReno(tp))
- NET_INC_STATS_BH(TCPRenoRecovery);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
else
- NET_INC_STATS_BH(TCPSackRecovery);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
tp->high_seq = tp->snd_nxt;
tp->prior_ssthresh = 0;
if (!sysctl_tcp_bic)
return tp->snd_cwnd;
- if (tp->bictcp.last_cwnd == tp->snd_cwnd)
- return tp->bictcp.cnt; /* same cwnd, no update */
-
+ if (tp->bictcp.last_cwnd == tp->snd_cwnd &&
+ (s32)(tcp_time_stamp - tp->bictcp.last_stamp) <= (HZ>>5))
+ return tp->bictcp.cnt;
+
tp->bictcp.last_cwnd = tp->snd_cwnd;
+ tp->bictcp.last_stamp = tcp_time_stamp;
/* start off normal */
if (tp->snd_cwnd <= sysctl_tcp_bic_low_window)
tcp_westwood_fast_bw(sk, skb);
flag |= FLAG_WIN_UPDATE;
- NET_INC_STATS_BH(TCPHPAcks);
+ NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
} else {
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
flag |= FLAG_DATA;
else
- NET_INC_STATS_BH(TCPPureAcks);
+ NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
{
if (tp->sack_ok && sysctl_tcp_dsack) {
if (before(seq, tp->rcv_nxt))
- NET_INC_STATS_BH(TCPDSACKOldSent);
+ NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
else
- NET_INC_STATS_BH(TCPDSACKOfoSent);
+ NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
tp->dsack = 1;
tp->duplicate_sack[0].start_seq = seq;
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- NET_INC_STATS_BH(DelayedACKLost);
+ NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
tcp_enter_quickack_mode(tp);
if (tp->sack_ok && sysctl_tcp_dsack) {
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
/* A retransmit, 2nd most common case. Force an immediate ack. */
- NET_INC_STATS_BH(DelayedACKLost);
+ NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
out_of_window:
struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list);
__kfree_skb(skb);
- NET_INC_STATS_BH(TCPRcvCollapsed);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
skb = next;
continue;
}
struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list);
__kfree_skb(skb);
- NET_INC_STATS_BH(TCPRcvCollapsed);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
skb = next;
if (skb == tail || skb->h.th->syn || skb->h.th->fin)
return;
SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
- NET_INC_STATS_BH(PruneCalled);
+ NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk, tp);
/* First, purge the out_of_order queue. */
if (skb_queue_len(&tp->out_of_order_queue)) {
- NET_ADD_STATS_BH(OfoPruned,
+ NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED,
skb_queue_len(&tp->out_of_order_queue));
__skb_queue_purge(&tp->out_of_order_queue);
* drop receive data on the floor. It will get retransmitted
* and hopefully then we'll have sufficient space.
*/
- NET_INC_STATS_BH(RcvPruned);
+ NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
/* Massive buffer overcommit. */
tp->pred_flags = 0;
tcp_data_snd_check(sk);
return 0;
} else { /* Header too small */
- TCP_INC_STATS_BH(TcpInErrs);
+ TCP_INC_STATS_BH(TCP_MIB_INERRS);
goto discard;
}
} else {
__skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
- NET_INC_STATS_BH(TCPHPHitsToUser);
+ NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
eaten = 1;
}
}
if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5;
- NET_INC_STATS_BH(TCPHPHits);
+ NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */
__skb_pull(skb,tcp_header_len);
if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp &&
tcp_paws_discard(tp, skb)) {
if (!th->rst) {
- NET_INC_STATS_BH(PAWSEstabRejected);
+ NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
tcp_send_dupack(sk, skb);
goto discard;
}
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- TCP_INC_STATS_BH(TcpInErrs);
- NET_INC_STATS_BH(TCPAbortOnSyn);
+ TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
tcp_reset(sk);
return 1;
}
return 0;
csum_error:
- TCP_INC_STATS_BH(TcpInErrs);
+ TCP_INC_STATS_BH(TCP_MIB_INERRS);
discard:
__kfree_skb(skb);
if (tp->saw_tstamp && tp->rcv_tsecr &&
!between(tp->rcv_tsecr, tp->retrans_stamp,
tcp_time_stamp)) {
- NET_INC_STATS_BH(PAWSActiveRejected);
+ NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
goto reset_and_undo;
}
return 1;
init_westwood(sk);
+ init_bictcp(tp);
/* Now we have several options: In theory there is
* nothing else in the frame. KA9Q has an option to
case TCP_SYN_SENT:
init_westwood(sk);
+ init_bictcp(tp);
queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
if (queued >= 0)
if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp &&
tcp_paws_discard(tp, skb)) {
if (!th->rst) {
- NET_INC_STATS_BH(PAWSEstabRejected);
+ NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
tcp_send_dupack(sk, skb);
goto discard;
}
* Check for a SYN in window.
*/
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- NET_INC_STATS_BH(TCPAbortOnSyn);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
tcp_reset(sk);
return 1;
}
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
tcp_done(sk);
- NET_INC_STATS_BH(TCPAbortOnData);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
return 1;
}
if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
- NET_INC_STATS_BH(TCPAbortOnData);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
tcp_reset(sk);
return 1;
}
if (twp) {
*twp = tw;
- NET_INC_STATS_BH(TimeWaitRecycled);
+ NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
} else if (tw) {
/* Silly. Should hash-dance instead... */
tcp_tw_deschedule(tw);
- NET_INC_STATS_BH(TimeWaitRecycled);
+ NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
tcp_tw_put(tw);
}
int err;
if (skb->len < (iph->ihl << 2) + 8) {
- ICMP_INC_STATS_BH(IcmpInErrors);
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
return;
}
sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
th->source, tcp_v4_iif(skb));
if (!sk) {
- ICMP_INC_STATS_BH(IcmpInErrors);
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
- NET_INC_STATS_BH(LockDroppedIcmps);
+ NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
goto out;
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
- NET_INC_STATS(OutOfWindowIcmps);
+ NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
BUG_TRAP(!req->sk);
if (seq != req->snt_isn) {
- NET_INC_STATS_BH(OutOfWindowIcmps);
+ NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
It can f.e. if SYNs crossed.
*/
if (!sock_owned_by_user(sk)) {
- TCP_INC_STATS_BH(TcpAttemptFails);
+ TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
sk->sk_err = err;
sk->sk_error_report(sk);
ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
- TCP_INC_STATS_BH(TcpOutSegs);
- TCP_INC_STATS_BH(TcpOutRsts);
+ TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
}
/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
- TCP_INC_STATS_BH(TcpOutSegs);
+ TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
}
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
.dport = req->rmt_port } } };
if (ip_route_output_flow(&rt, &fl, sk, 0)) {
- IP_INC_STATS_BH(OutNoRoutes);
+ IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
ip_rt_put(rt);
- IP_INC_STATS_BH(OutNoRoutes);
+ IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
return &rt->u.dst;
if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
- NET_INC_STATS_BH(PAWSPassiveRejected);
+ NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
dst_release(dst);
goto drop_and_free;
}
drop_and_free:
tcp_openreq_free(req);
drop:
- TCP_INC_STATS_BH(TcpAttemptFails);
+ TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
return 0;
}
return newsk;
exit_overflow:
- NET_INC_STATS_BH(ListenOverflows);
+ NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
exit:
- NET_INC_STATS_BH(ListenDrops);
+ NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
dst_release(dst);
return NULL;
}
return 0;
csum_err:
- TCP_INC_STATS_BH(TcpInErrs);
+ TCP_INC_STATS_BH(TCP_MIB_INERRS);
goto discard;
}
goto discard_it;
/* Count it even if it's bad */
- TCP_INC_STATS_BH(TcpInSegs);
+ TCP_INC_STATS_BH(TCP_MIB_INSEGS);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
bad_packet:
- TCP_INC_STATS_BH(TcpInErrs);
+ TCP_INC_STATS_BH(TCP_MIB_INERRS);
} else {
tcp_v4_send_reset(skb);
}
}
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
- TCP_INC_STATS_BH(TcpInErrs);
+ TCP_INC_STATS_BH(TCP_MIB_INERRS);
tcp_tw_put((struct tcp_tw_bucket *) sk);
goto discard_it;
}
if (tp->bind_hash)
tcp_put_port(sk);
+ /*
+ * If sendmsg cached page exists, toss it.
+ */
+ if (sk->sk_sndmsg_page) {
+ __free_page(sk->sk_sndmsg_page);
+ sk->sk_sndmsg_page = NULL;
+ }
+
atomic_dec(&tcp_sockets_allocated);
return 0;
}
if (paws_reject)
- NET_INC_STATS_BH(PAWSEstabRejected);
+ NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
if(!th->rst) {
/* In this case we must reset the TIMEWAIT timer.
}
tcp_tw_count -= killed;
- NET_ADD_STATS_BH(TimeWaited, killed);
+ NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
return ret;
}
out:
if ((tcp_tw_count -= killed) == 0)
del_timer(&tcp_tw_timer);
- NET_ADD_STATS_BH(TimeWaitKilled, killed);
+ NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
spin_unlock(&tw_death_lock);
}
if ((filter = newsk->sk_filter) != NULL)
sk_filter_charge(newsk, filter);
+ if (sk->sk_create_child)
+ sk->sk_create_child(sk, newsk);
+
if (unlikely(xfrm_sk_clone_policy(newsk))) {
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newtp->snd_cwnd = 2;
newtp->snd_cwnd_cnt = 0;
- newtp->bictcp.cnt = 0;
- newtp->bictcp.last_max_cwnd = newtp->bictcp.last_cwnd = 0;
-
newtp->frto_counter = 0;
newtp->frto_highmark = 0;
newsk->sk_no_largesend = 1;
tcp_vegas_init(newtp);
- TCP_INC_STATS_BH(TcpPassiveOpens);
+ TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
}
return newsk;
}
if (!(flg & TCP_FLAG_RST))
req->class->send_ack(skb, req);
if (paws_reject)
- NET_INC_STATS_BH(PAWSEstabRejected);
+ NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
return NULL;
}
}
embryonic_reset:
- NET_INC_STATS_BH(EmbryonicRsts);
+ NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
if (!(flg & TCP_FLAG_RST))
req->class->send_reset(skb);
tp->rcv_wnd = new_win;
tp->rcv_wup = tp->rcv_nxt;
+ /* Make sure we do not exceed the maximum possible
+ * scaled window.
+ */
+ if (!tp->rcv_wscale)
+ new_win = min(new_win, MAX_TCP_WINDOW);
+ else
+ new_win = min(new_win, (65535U << tp->rcv_wscale));
+
/* RFC1323 scaling applied */
new_win >>= tp->rcv_wscale;
if (skb->len != tcp_header_size)
tcp_event_data_sent(tp, skb, sk);
- TCP_INC_STATS(TcpOutSegs);
+ TCP_INC_STATS(TCP_MIB_OUTSEGS);
err = tp->af_specific->queue_xmit(skb, 0);
if (err <= 0)
if (err == 0) {
/* Update global TCP statistics. */
- TCP_INC_STATS(TcpRetransSegs);
+ TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
#if FASTRETRANS_DEBUG > 0
if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
if (tcp_retransmit_skb(sk, skb))
return;
if (tp->ca_state != TCP_CA_Loss)
- NET_INC_STATS_BH(TCPFastRetrans);
+ NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
else
- NET_INC_STATS_BH(TCPSlowStartRetrans);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
if (skb ==
skb_peek(&sk->sk_write_queue))
if (skb == skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
- NET_INC_STATS_BH(TCPForwardRetrans);
+ NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
}
}
/* NOTE: No TCP options attached and we never retransmit this. */
skb = alloc_skb(MAX_TCP_HEADER, priority);
if (!skb) {
- NET_INC_STATS(TCPAbortFailed);
+ NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
return;
}
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb))
- NET_INC_STATS(TCPAbortFailed);
+ NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
}
/* WARNING: This routine must only be called when we have already sent
skb->csum = 0;
th->doff = (tcp_header_size >> 2);
- TCP_INC_STATS(TcpOutSegs);
+ TCP_INC_STATS(TCP_MIB_OUTSEGS);
return skb;
}
sk_charge_skb(sk, buff);
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL));
- TCP_INC_STATS(TcpActiveOpens);
+ TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
/* Timer for repeating the SYN until an answer. */
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
sk->sk_error_report(sk);
tcp_done(sk);
- NET_INC_STATS_BH(TCPAbortOnTimeout);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT);
}
/* Do not allow orphaned sockets to eat all our resources.
if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk);
- NET_INC_STATS_BH(TCPAbortOnMemory);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
return 1;
}
return 0;
if (sock_owned_by_user(sk)) {
/* Try again later. */
tp->ack.blocked = 1;
- NET_INC_STATS_BH(DelayedACKLocked);
+ NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN);
goto out_unlock;
}
if (skb_queue_len(&tp->ucopy.prequeue)) {
struct sk_buff *skb;
- NET_ADD_STATS_BH(TCPSchedulerFailed,
- skb_queue_len(&tp->ucopy.prequeue));
+ NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED,
+ skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->sk_backlog_rcv(sk, skb);
tp->ack.ato = TCP_ATO_MIN;
}
tcp_send_ack(sk);
- NET_INC_STATS_BH(DelayedACKs);
+ NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
}
TCP_CHECK_TIMER(sk);
if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
if (tp->sack_ok) {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(TCPSackRecoveryFail);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
else
- NET_INC_STATS_BH(TCPSackFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
} else {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(TCPRenoRecoveryFail);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
else
- NET_INC_STATS_BH(TCPRenoFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
}
} else if (tp->ca_state == TCP_CA_Loss) {
- NET_INC_STATS_BH(TCPLossFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
} else {
- NET_INC_STATS_BH(TCPTimeouts);
+ NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
}
}
sk = udp_v4_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex);
if (sk == NULL) {
- ICMP_INC_STATS_BH(IcmpInErrors);
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
return; /* No socket for error */
}
if (free)
kfree(ipc.opt);
if (!err) {
- UDP_INC_STATS_USER(UdpOutDatagrams);
+ UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
return len;
}
return err;
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
+
err = copied;
+ if (flags & MSG_TRUNC)
+ err = skb->len - sizeof(struct udphdr);
out_free:
skb_free_datagram(sk, skb);
return err;
csum_copy_err:
- UDP_INC_STATS_BH(UdpInErrors);
+ UDP_INC_STATS_BH(UDP_MIB_INERRORS);
/* Clear queue. */
if (flags&MSG_PEEK) {
goto try_again;
}
-int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
-{
- struct inet_opt *inet = inet_sk(sk);
- struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
- struct rtable *rt;
- u32 saddr;
- int oif;
- int err;
-
-
- if (addr_len < sizeof(*usin))
- return -EINVAL;
-
- if (usin->sin_family != AF_INET)
- return -EAFNOSUPPORT;
-
- sk_dst_reset(sk);
-
- oif = sk->sk_bound_dev_if;
- saddr = inet->saddr;
- if (MULTICAST(usin->sin_addr.s_addr)) {
- if (!oif)
- oif = inet->mc_index;
- if (!saddr)
- saddr = inet->mc_addr;
- }
- err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
- RT_CONN_FLAGS(sk), oif,
- IPPROTO_UDP,
- inet->sport, usin->sin_port, sk);
- if (err)
- return err;
- if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
- ip_rt_put(rt);
- return -EACCES;
- }
- if (!inet->saddr)
- inet->saddr = rt->rt_src; /* Update source address */
- if (!inet->rcv_saddr)
- inet->rcv_saddr = rt->rt_src;
- inet->daddr = rt->rt_dst;
- inet->dport = usin->sin_port;
- sk->sk_state = TCP_ESTABLISHED;
- inet->id = jiffies;
-
- sk_dst_set(sk, &rt->u.dst);
- return(0);
-}
int udp_disconnect(struct sock *sk, int flags)
{
} else
/* Must be an IKE packet.. pass it through */
return 1;
-
+ break;
case UDP_ENCAP_ESPINUDP_NON_IKE:
/* Check if this is a keepalive packet. If so, eat it. */
if (len == 1 && udpdata[0] == 0xff) {
} else
/* Must be an IKE packet.. pass it through */
return 1;
+ break;
}
/* At this point we are sure that this is an ESPinUDP packet,
if (ret < 0) {
/* process the ESP packet */
ret = xfrm4_rcv_encap(skb, up->encap_type);
- UDP_INC_STATS_BH(UdpInDatagrams);
+ UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
return -ret;
}
/* FALLTHROUGH -- it's a UDP Packet */
if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if (__udp_checksum_complete(skb)) {
- UDP_INC_STATS_BH(UdpInErrors);
+ UDP_INC_STATS_BH(UDP_MIB_INERRORS);
kfree_skb(skb);
return -1;
}
}
if (sock_queue_rcv_skb(sk,skb)<0) {
- UDP_INC_STATS_BH(UdpInErrors);
+ UDP_INC_STATS_BH(UDP_MIB_INERRORS);
kfree_skb(skb);
return -1;
}
- UDP_INC_STATS_BH(UdpInDatagrams);
+ UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
return 0;
}
if (udp_checksum_complete(skb))
goto csum_error;
- UDP_INC_STATS_BH(UdpNoPorts);
+ UDP_INC_STATS_BH(UDP_MIB_NOPORTS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/*
NIPQUAD(daddr),
ntohs(uh->dest)));
no_header:
- UDP_INC_STATS_BH(UdpInErrors);
+ UDP_INC_STATS_BH(UDP_MIB_INERRORS);
kfree_skb(skb);
return(0);
ntohs(uh->dest),
ulen));
drop:
- UDP_INC_STATS_BH(UdpInErrors);
+ UDP_INC_STATS_BH(UDP_MIB_INERRORS);
kfree_skb(skb);
return(0);
}
struct proto udp_prot = {
.name = "UDP",
.close = udp_close,
- .connect = udp_connect,
+ .connect = ip4_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udp_destroy_sock,
}
#endif /* CONFIG_PROC_FS */
-EXPORT_SYMBOL(udp_connect);
EXPORT_SYMBOL(udp_disconnect);
EXPORT_SYMBOL(udp_hash);
EXPORT_SYMBOL(udp_hash_lock);
#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
+#include <net/icmp.h>
/* Add encapsulation header.
*
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
}
+static int xfrm4_tunnel_check_size(struct sk_buff *skb)
+{
+ int mtu, ret = 0;
+ struct dst_entry *dst;
+ struct iphdr *iph = skb->nh.iph;
+
+ if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
+ goto out;
+
+ IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
+
+ if (!(iph->frag_off & htons(IP_DF)))
+ goto out;
+
+ dst = skb->dst;
+ mtu = dst_pmtu(dst) - dst->header_len - dst->trailer_len;
+ if (skb->len > mtu) {
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ ret = -EMSGSIZE;
+ }
+out:
+ return ret;
+}
+
int xfrm4_output(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
proto == x->id.proto &&
saddr->a4 == x->props.saddr.a4 &&
reqid == x->props.reqid &&
- x->km.state == XFRM_STATE_ACQ) {
- if (!x0)
- x0 = x;
- if (x->id.spi)
- continue;
+ x->km.state == XFRM_STATE_ACQ &&
+ !x->id.spi) {
x0 = x;
break;
}
#include <linux/skbuff.h>
#include <net/xfrm.h>
#include <net/ip.h>
-#include <net/icmp.h>
-#include <net/inet_ecn.h>
-
-int xfrm4_tunnel_check_size(struct sk_buff *skb)
-{
- int mtu, ret = 0;
- struct dst_entry *dst;
- struct iphdr *iph = skb->nh.iph;
-
- if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
- goto out;
-
- IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
-
- if (!(iph->frag_off & htons(IP_DF)))
- goto out;
-
- dst = skb->dst;
- mtu = dst_pmtu(dst) - dst->header_len - dst->trailer_len;
- if (skb->len > mtu) {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
- ret = -EMSGSIZE;
- }
-out:
- return ret;
-}
+#include <net/protocol.h>
static int ipip_output(struct sk_buff **pskb)
{
config IPV6_PRIVACY
bool "IPv6: Privacy Extensions (RFC 3041) support"
depends on IPV6
- select CRYPTO
- select CRYPTO_MD5
---help---
Privacy Extensions for Stateless Address Autoconfiguration in IPv6
support. With this option, additional periodically-alter
ip6_flowlabel.o ipv6_syms.o
ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
- xfrm6_tunnel.o
+ xfrm6_tunnel.o xfrm6_output.o
ipv6-objs += $(ipv6-y)
obj-$(CONFIG_INET6_AH) += ah6.o
p.iph.ihl = 5;
p.iph.protocol = IPPROTO_IPV6;
p.iph.ttl = 64;
- ifr.ifr_ifru.ifru_data = (void*)&p;
+ ifr.ifr_ifru.ifru_data = (void __user *)&p;
oldfs = get_fs(); set_fs(KERNEL_DS);
err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
static
int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
int ret;
- ret = proc_dointvec(ctl, write, filp, buffer, lenp);
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (write && *valp != val && valp != &ipv6_devconf_dflt.forwarding) {
struct inet6_dev *idev = NULL;
.flags = INET_PROTOSW_REUSE,
};
-#define INETSW6_ARRAY_LEN (sizeof(inetsw6_array) / sizeof(struct inet_protosw))
-
void
inet6_register_protosw(struct inet_protosw *p)
{
#include <linux/config.h>
#include <linux/module.h>
-#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/ah.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
+#include <linux/string.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/xfrm.h>
return 0;
}
-static int ipv6_clear_mutable_options(struct sk_buff *skb, u16 *nh_offset, int dir)
+/**
+ * ipv6_rearrange_rthdr - rearrange IPv6 routing header
+ * @iph: IPv6 header
+ * @rthdr: routing header
+ *
+ * Rearrange the destination address in @iph and the addresses in @rthdr
+ * so that they appear in the order they will at the final destination.
+ * See Appendix A2 of RFC 2402 for details.
+ */
+static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
{
- u16 offset = sizeof(struct ipv6hdr);
- struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
- unsigned int packet_len = skb->tail - skb->nh.raw;
- u8 nexthdr = skb->nh.ipv6h->nexthdr;
- u8 nextnexthdr = 0;
+ int segments, segments_left;
+ struct in6_addr *addrs;
+ struct in6_addr final_addr;
+
+ segments_left = rthdr->segments_left;
+ if (segments_left == 0)
+ return;
+ rthdr->segments_left = 0;
+
+ /* The value of rthdr->hdrlen has been verified either by the system
+ * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
+ * packets. So we can assume that it is even and that segments is
+ * greater than or equal to segments_left.
+ *
+ * For the same reason we can assume that this option is of type 0.
+ */
+ segments = rthdr->hdrlen >> 1;
- *nh_offset = ((unsigned char *)&skb->nh.ipv6h->nexthdr) - skb->nh.raw;
+ addrs = ((struct rt0_hdr *)rthdr)->addr;
+ ipv6_addr_copy(&final_addr, addrs + segments - 1);
- while (offset + 1 <= packet_len) {
+ addrs += segments - segments_left;
+ memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
- switch (nexthdr) {
+ ipv6_addr_copy(addrs, &iph->daddr);
+ ipv6_addr_copy(&iph->daddr, &final_addr);
+}
+static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len)
+{
+ union {
+ struct ipv6hdr *iph;
+ struct ipv6_opt_hdr *opth;
+ struct ipv6_rt_hdr *rth;
+ char *raw;
+ } exthdr = { .iph = iph };
+ char *end = exthdr.raw + len;
+ int nexthdr = iph->nexthdr;
+
+ exthdr.iph++;
+
+ while (exthdr.raw < end) {
+ switch (nexthdr) {
case NEXTHDR_HOP:
- *nh_offset = offset;
- offset += ipv6_optlen(exthdr);
- if (!zero_out_mutable_opts(exthdr)) {
- LIMIT_NETDEBUG(
- printk(KERN_WARNING "overrun hopopts\n"));
- return 0;
+ case NEXTHDR_DEST:
+ if (!zero_out_mutable_opts(exthdr.opth)) {
+ LIMIT_NETDEBUG(printk(
+ KERN_WARNING "overrun %sopts\n",
+ nexthdr == NEXTHDR_HOP ?
+ "hop" : "dest"));
+ return -EINVAL;
}
- nexthdr = exthdr->nexthdr;
- exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_ROUTING:
- *nh_offset = offset;
- offset += ipv6_optlen(exthdr);
- ((struct ipv6_rt_hdr*)exthdr)->segments_left = 0;
- nexthdr = exthdr->nexthdr;
- exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
- break;
-
- case NEXTHDR_DEST:
- *nh_offset = offset;
- offset += ipv6_optlen(exthdr);
- if (!zero_out_mutable_opts(exthdr)) {
- LIMIT_NETDEBUG(
- printk(KERN_WARNING "overrun destopt\n"));
- return 0;
- }
- nexthdr = exthdr->nexthdr;
- exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
+ ipv6_rearrange_rthdr(iph, exthdr.rth);
break;
- case NEXTHDR_AUTH:
- if (dir == XFRM_POLICY_OUT) {
- memset(((struct ipv6_auth_hdr*)exthdr)->auth_data, 0,
- (((struct ipv6_auth_hdr*)exthdr)->hdrlen - 1) << 2);
- }
- if (exthdr->nexthdr == NEXTHDR_DEST) {
- offset += (((struct ipv6_auth_hdr*)exthdr)->hdrlen + 2) << 2;
- exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
- nextnexthdr = exthdr->nexthdr;
- if (!zero_out_mutable_opts(exthdr)) {
- LIMIT_NETDEBUG(
- printk(KERN_WARNING "overrun destopt\n"));
- return 0;
- }
- }
- return nexthdr;
default :
- return nexthdr;
+ return 0;
}
+
+ nexthdr = exthdr.opth->nexthdr;
+ exthdr.raw += ipv6_optlen(exthdr.opth);
}
- return nexthdr;
+ return 0;
}
int ah6_output(struct sk_buff **pskb)
{
int err;
- int hdr_len = sizeof(struct ipv6hdr);
+ int extlen;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *iph = NULL;
+ struct ipv6hdr *top_iph;
struct ip_auth_hdr *ah;
struct ah_data *ahp;
- u16 nh_offset = 0;
u8 nexthdr;
+ char tmp_base[8];
+ struct {
+ struct in6_addr daddr;
+ char hdrs[0];
+ } *tmp_ext;
- if ((*pskb)->ip_summed == CHECKSUM_HW) {
- err = skb_checksum_help(pskb, 0);
- if (err)
- goto error_nolock;
- }
-
- spin_lock_bh(&x->lock);
- err = xfrm_state_check(x, *pskb);
- if (err)
- goto error;
+ top_iph = (struct ipv6hdr *)(*pskb)->data;
+ top_iph->payload_len = htons((*pskb)->len - sizeof(*top_iph));
- if (x->props.mode) {
- err = xfrm6_tunnel_check_size(*pskb);
- if (err)
- goto error;
+ nexthdr = *(*pskb)->nh.raw;
+ *(*pskb)->nh.raw = IPPROTO_AH;
- iph = (*pskb)->nh.ipv6h;
- (*pskb)->nh.ipv6h = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
- (*pskb)->nh.ipv6h->version = 6;
- (*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
- (*pskb)->nh.ipv6h->nexthdr = IPPROTO_AH;
- ipv6_addr_copy(&(*pskb)->nh.ipv6h->saddr,
- (struct in6_addr *) &x->props.saddr);
- ipv6_addr_copy(&(*pskb)->nh.ipv6h->daddr,
- (struct in6_addr *) &x->id.daddr);
- ah = (struct ip_auth_hdr*)((*pskb)->nh.ipv6h+1);
- ah->nexthdr = IPPROTO_IPV6;
- } else {
- hdr_len = (*pskb)->h.raw - (*pskb)->nh.raw;
- iph = kmalloc(hdr_len, GFP_ATOMIC);
- if (!iph) {
+ /* When there are no extension headers, we only need to save the first
+ * 8 bytes of the base IP header.
+ */
+ memcpy(tmp_base, top_iph, sizeof(tmp_base));
+
+ tmp_ext = NULL;
+ extlen = (*pskb)->h.raw - (unsigned char *)(top_iph + 1);
+ if (extlen) {
+ extlen += sizeof(*tmp_ext);
+ tmp_ext = kmalloc(extlen, GFP_ATOMIC);
+ if (!tmp_ext) {
err = -ENOMEM;
goto error;
}
- memcpy(iph, (*pskb)->data, hdr_len);
- (*pskb)->nh.ipv6h = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
- memcpy((*pskb)->nh.ipv6h, iph, hdr_len);
- nexthdr = ipv6_clear_mutable_options(*pskb, &nh_offset, XFRM_POLICY_OUT);
- if (nexthdr == 0)
+ memcpy(tmp_ext, &top_iph->daddr, extlen);
+ err = ipv6_clear_mutable_options(top_iph,
+ extlen - sizeof(*tmp_ext) +
+ sizeof(*top_iph));
+ if (err)
goto error_free_iph;
-
- (*pskb)->nh.raw[nh_offset] = IPPROTO_AH;
- (*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
- ah = (struct ip_auth_hdr*)((*pskb)->nh.raw+hdr_len);
- (*pskb)->h.raw = (unsigned char*) ah;
- ah->nexthdr = nexthdr;
}
- (*pskb)->nh.ipv6h->priority = 0;
- (*pskb)->nh.ipv6h->flow_lbl[0] = 0;
- (*pskb)->nh.ipv6h->flow_lbl[1] = 0;
- (*pskb)->nh.ipv6h->flow_lbl[2] = 0;
- (*pskb)->nh.ipv6h->hop_limit = 0;
+ ah = (struct ip_auth_hdr *)(*pskb)->h.raw;
+ ah->nexthdr = nexthdr;
+
+ top_iph->priority = 0;
+ top_iph->flow_lbl[0] = 0;
+ top_iph->flow_lbl[1] = 0;
+ top_iph->flow_lbl[2] = 0;
+ top_iph->hop_limit = 0;
ahp = x->data;
ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) +
ah->seq_no = htonl(++x->replay.oseq);
ahp->icv(ahp, *pskb, ah->auth_data);
- if (x->props.mode) {
- (*pskb)->nh.ipv6h->hop_limit = iph->hop_limit;
- (*pskb)->nh.ipv6h->priority = iph->priority;
- (*pskb)->nh.ipv6h->flow_lbl[0] = iph->flow_lbl[0];
- (*pskb)->nh.ipv6h->flow_lbl[1] = iph->flow_lbl[1];
- (*pskb)->nh.ipv6h->flow_lbl[2] = iph->flow_lbl[2];
- if (x->props.flags & XFRM_STATE_NOECN)
- IP6_ECN_clear((*pskb)->nh.ipv6h);
- } else {
- memcpy((*pskb)->nh.ipv6h, iph, hdr_len);
- (*pskb)->nh.raw[nh_offset] = IPPROTO_AH;
- (*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
- kfree (iph);
- }
-
- (*pskb)->nh.raw = (*pskb)->data;
+ err = 0;
- x->curlft.bytes += (*pskb)->len;
- x->curlft.packets++;
- spin_unlock_bh(&x->lock);
- if (((*pskb)->dst = dst_pop(dst)) == NULL) {
- err = -EHOSTUNREACH;
- goto error_nolock;
- }
- return NET_XMIT_BYPASS;
+ memcpy(top_iph, tmp_base, sizeof(tmp_base));
+ if (tmp_ext) {
+ memcpy(&top_iph->daddr, tmp_ext, extlen);
error_free_iph:
- kfree(iph);
+ kfree(tmp_ext);
+ }
+
error:
- spin_unlock_bh(&x->lock);
-error_nolock:
- kfree_skb(*pskb);
return err;
}
* Before process AH
* [IPv6][Ext1][Ext2][AH][Dest][Payload]
* |<-------------->| hdr_len
- * |<------------------------>| cleared_hlen
*
* To erase AH:
* Keeping copy of cleared headers. After AH processing,
unsigned char *tmp_hdr = NULL;
u16 hdr_len;
u16 ah_hlen;
- u16 cleared_hlen;
- u16 nh_offset = 0;
- u8 nexthdr = 0;
- u8 *prevhdr;
+ int nexthdr;
if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
goto out;
goto out;
hdr_len = skb->data - skb->nh.raw;
- cleared_hlen = hdr_len;
ah = (struct ipv6_auth_hdr*)skb->data;
ahp = x->data;
nexthdr = ah->nexthdr;
ah_hlen = (ah->hdrlen + 2) << 2;
- cleared_hlen += ah_hlen;
-
- if (nexthdr == NEXTHDR_DEST) {
- struct ipv6_opt_hdr *dsthdr = (struct ipv6_opt_hdr*)(skb->data + ah_hlen);
- cleared_hlen += ipv6_optlen(dsthdr);
- }
if (ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_full_len) &&
ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len))
if (!pskb_may_pull(skb, ah_hlen))
goto out;
- tmp_hdr = kmalloc(cleared_hlen, GFP_ATOMIC);
+ tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
if (!tmp_hdr)
goto out;
- memcpy(tmp_hdr, skb->nh.raw, cleared_hlen);
- ipv6_clear_mutable_options(skb, &nh_offset, XFRM_POLICY_IN);
+ memcpy(tmp_hdr, skb->nh.raw, hdr_len);
+ if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len))
+ goto out;
skb->nh.ipv6h->priority = 0;
skb->nh.ipv6h->flow_lbl[0] = 0;
skb->nh.ipv6h->flow_lbl[1] = 0;
skb->nh.raw = skb_pull(skb, ah_hlen);
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
- if (nexthdr == NEXTHDR_DEST) {
- memcpy(skb->nh.raw + hdr_len,
- tmp_hdr + hdr_len + ah_hlen,
- cleared_hlen - hdr_len - ah_hlen);
- }
- prevhdr = (u8*)(skb->nh.raw + nh_offset);
- *prevhdr = nexthdr;
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb_pull(skb, hdr_len);
skb->h.raw = skb->data;
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/transp_v6.h>
+#include <net/ip6_route.h>
#include <linux/errqueue.h>
#include <asm/uaccess.h>
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
+ struct inet_opt *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct in6_addr *daddr;
+ struct dst_entry *dst;
+ struct flowi fl;
+ struct ip6_flowlabel *flowlabel = NULL;
+ int addr_type;
+ int err;
+
+ if (usin->sin6_family == AF_INET) {
+ if (__ipv6_only_sock(sk))
+ return -EAFNOSUPPORT;
+ err = ip4_datagram_connect(sk, uaddr, addr_len);
+ goto ipv4_connected;
+ }
+
+ if (addr_len < SIN6_LEN_RFC2133)
+ return -EINVAL;
+
+ if (usin->sin6_family != AF_INET6)
+ return -EAFNOSUPPORT;
+
+ memset(&fl, 0, sizeof(fl));
+ if (np->sndflow) {
+ fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+ if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
+ flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ }
+ }
+
+ addr_type = ipv6_addr_type(&usin->sin6_addr);
+
+ if (addr_type == IPV6_ADDR_ANY) {
+ /*
+ * connect to self
+ */
+ usin->sin6_addr.s6_addr[15] = 0x01;
+ }
+
+ daddr = &usin->sin6_addr;
+
+ if (addr_type == IPV6_ADDR_MAPPED) {
+ struct sockaddr_in sin;
+
+ if (__ipv6_only_sock(sk)) {
+ err = -ENETUNREACH;
+ goto out;
+ }
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = daddr->s6_addr32[3];
+ sin.sin_port = usin->sin6_port;
+
+ err = ip4_datagram_connect(sk,
+ (struct sockaddr*) &sin,
+ sizeof(sin));
+
+ipv4_connected:
+ if (err)
+ goto out;
+
+ ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr);
+
+ if (ipv6_addr_any(&np->saddr)) {
+ ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff),
+ inet->saddr);
+ }
+
+ if (ipv6_addr_any(&np->rcv_saddr)) {
+ ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff),
+ inet->rcv_saddr);
+ }
+ goto out;
+ }
+
+ if (addr_type&IPV6_ADDR_LINKLOCAL) {
+ if (addr_len >= sizeof(struct sockaddr_in6) &&
+ usin->sin6_scope_id) {
+ if (sk->sk_bound_dev_if &&
+ sk->sk_bound_dev_if != usin->sin6_scope_id) {
+ err = -EINVAL;
+ goto out;
+ }
+ sk->sk_bound_dev_if = usin->sin6_scope_id;
+ if (!sk->sk_bound_dev_if &&
+ (addr_type & IPV6_ADDR_MULTICAST))
+ fl.oif = np->mcast_oif;
+ }
+
+ /* Connect to link-local address requires an interface */
+ if (!sk->sk_bound_dev_if) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ ipv6_addr_copy(&np->daddr, daddr);
+ np->flow_label = fl.fl6_flowlabel;
+
+ inet->dport = usin->sin6_port;
+
+ /*
+ * Check for a route to destination an obtain the
+ * destination cache for it.
+ */
+
+ fl.proto = sk->sk_protocol;
+ ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
+ ipv6_addr_copy(&fl.fl6_src, &np->saddr);
+ fl.oif = sk->sk_bound_dev_if;
+ fl.fl_ip_dport = inet->dport;
+ fl.fl_ip_sport = inet->sport;
+
+ if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST))
+ fl.oif = np->mcast_oif;
+
+ if (flowlabel) {
+ if (flowlabel->opt && flowlabel->opt->srcrt) {
+ struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
+ ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
+ }
+ } else if (np->opt && np->opt->srcrt) {
+ struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
+ ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
+ }
+
+ err = ip6_dst_lookup(sk, &dst, &fl);
+ if (err)
+ goto out;
+
+ /* source address lookup done in ip6_dst_lookup */
+
+ if (ipv6_addr_any(&np->saddr))
+ ipv6_addr_copy(&np->saddr, &fl.fl6_src);
+
+ if (ipv6_addr_any(&np->rcv_saddr)) {
+ ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
+ inet->rcv_saddr = LOOPBACK4_IPV6;
+ }
+
+ ip6_dst_store(sk, dst,
+ !ipv6_addr_cmp(&fl.fl6_dst, &np->daddr) ?
+ &np->daddr : NULL);
+
+ sk->sk_state = TCP_ESTABLISHED;
+out:
+ fl6_sock_release(flowlabel);
+ return err;
+}
+
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
u16 port, u32 info, u8 *payload)
{
#include <linux/config.h>
#include <linux/module.h>
-#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
int esp6_output(struct sk_buff **pskb)
{
int err;
- int hdr_len = 0;
+ int hdr_len;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *iph = NULL, *top_iph;
+ struct ipv6hdr *top_iph;
struct ipv6_esp_hdr *esph;
struct crypto_tfm *tfm;
struct esp_data *esp;
int clen;
int alen;
int nfrags;
- u8 *prevhdr;
- u8 nexthdr = 0;
- if ((*pskb)->ip_summed == CHECKSUM_HW) {
- err = skb_checksum_help(pskb, 0);
- if (err)
- goto error_nolock;
- }
+ esp = x->data;
+ hdr_len = (*pskb)->h.raw - (*pskb)->data +
+ sizeof(*esph) + esp->conf.ivlen;
- spin_lock_bh(&x->lock);
- err = xfrm_state_check(x, *pskb);
- if (err)
- goto error;
-
- if (x->props.mode) {
- err = xfrm6_tunnel_check_size(*pskb);
- if (err)
- goto error;
- } else {
- /* Strip IP header in transport mode. Save it. */
- hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
- nexthdr = *prevhdr;
- *prevhdr = IPPROTO_ESP;
- iph = kmalloc(hdr_len, GFP_ATOMIC);
- if (!iph) {
- err = -ENOMEM;
- goto error;
- }
- memcpy(iph, (*pskb)->nh.raw, hdr_len);
- __skb_pull(*pskb, hdr_len);
- }
+ /* Strip IP+ESP header. */
+ __skb_pull(*pskb, hdr_len);
/* Now skb is pure payload to encrypt */
err = -ENOMEM;
/* Round to block size */
clen = (*pskb)->len;
- esp = x->data;
alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm;
blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3;
clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);
if ((nfrags = skb_cow_data(*pskb, clen-(*pskb)->len+alen, &trailer)) < 0) {
- if (!x->props.mode && iph) kfree(iph);
goto error;
}
*(u8*)(trailer->tail + clen-(*pskb)->len - 2) = (clen - (*pskb)->len)-2;
pskb_put(*pskb, trailer, clen - (*pskb)->len);
- if (x->props.mode) {
- iph = (*pskb)->nh.ipv6h;
- top_iph = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
- esph = (struct ipv6_esp_hdr*)(top_iph+1);
- *(u8*)(trailer->tail - 1) = IPPROTO_IPV6;
- top_iph->version = 6;
- top_iph->priority = iph->priority;
- top_iph->flow_lbl[0] = iph->flow_lbl[0];
- top_iph->flow_lbl[1] = iph->flow_lbl[1];
- top_iph->flow_lbl[2] = iph->flow_lbl[2];
- if (x->props.flags & XFRM_STATE_NOECN)
- IP6_ECN_clear(top_iph);
- top_iph->nexthdr = IPPROTO_ESP;
- top_iph->payload_len = htons((*pskb)->len + alen - sizeof(struct ipv6hdr));
- top_iph->hop_limit = iph->hop_limit;
- ipv6_addr_copy(&top_iph->saddr,
- (struct in6_addr *)&x->props.saddr);
- ipv6_addr_copy(&top_iph->daddr,
- (struct in6_addr *)&x->id.daddr);
- } else {
- esph = (struct ipv6_esp_hdr*)skb_push(*pskb, x->props.header_len);
- (*pskb)->h.raw = (unsigned char*)esph;
- top_iph = (struct ipv6hdr*)skb_push(*pskb, hdr_len);
- memcpy(top_iph, iph, hdr_len);
- kfree(iph);
- top_iph->payload_len = htons((*pskb)->len + alen - sizeof(struct ipv6hdr));
- *(u8*)(trailer->tail - 1) = nexthdr;
- }
+ top_iph = (struct ipv6hdr *)__skb_push(*pskb, hdr_len);
+ esph = (struct ipv6_esp_hdr *)(*pskb)->h.raw;
+ top_iph->payload_len = htons((*pskb)->len + alen - sizeof(*top_iph));
+ *(u8*)(trailer->tail - 1) = *(*pskb)->nh.raw;
+ *(*pskb)->nh.raw = IPPROTO_ESP;
esph->spi = x->id.spi;
esph->seq_no = htonl(++x->replay.oseq);
pskb_put(*pskb, trailer, alen);
}
- (*pskb)->nh.raw = (*pskb)->data;
-
- x->curlft.bytes += (*pskb)->len;
- x->curlft.packets++;
- spin_unlock_bh(&x->lock);
- if (((*pskb)->dst = dst_pop(dst)) == NULL) {
- err = -EHOSTUNREACH;
- goto error_nolock;
- }
- return NET_XMIT_BYPASS;
+ err = 0;
error:
- spin_unlock_bh(&x->lock);
-error_nolock:
- kfree_skb(*pskb);
return err;
}
u8 nexthdr[2];
struct scatterlist *sg = &esp->sgbuf[0];
u8 padlen;
- u8 *prevhdr;
if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
- ip6_find_1stfragopt(skb, &prevhdr);
- ret = *prevhdr = nexthdr[1];
+ ret = nexthdr[1];
}
out:
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
!pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -1;
}
return 1;
}
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
return -1;
}
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
!pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -1;
}
if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) ||
skb->pkt_type != PACKET_HOST) {
- IP6_INC_STATS_BH(InAddrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb);
return -1;
}
}
if (hdr->type != IPV6_SRCRT_TYPE_0) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
return -1;
}
if (hdr->hdrlen & 0x01) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw);
return -1;
}
n = hdr->hdrlen >> 1;
if (hdr->segments_left > n) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw);
return -1;
}
kfree_skb(skb);
/* the copy is a forwarded packet */
if (skb2 == NULL) {
- IP6_INC_STATS_BH(OutDiscards);
+ IP6_INC_STATS_BH(IPSTATS_MIB_OUTDISCARDS);
return -1;
}
*skbp = skb = skb2;
addr += i - 1;
if (ipv6_addr_is_multicast(addr)) {
- IP6_INC_STATS_BH(InAddrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb);
return -1;
}
}
if (skb->dst->dev->flags&IFF_LOOPBACK) {
if (skb->nh.ipv6h->hop_limit <= 1) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
0, skb->dev);
kfree_skb(skb);
if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
LIMIT_NETDEBUG(
printk(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", skb->nh.raw[optoff+1]));
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
goto drop;
}
pkt_len = ntohl(*(u32*)(skb->nh.raw+optoff+2));
if (pkt_len <= IPV6_MAXPLEN) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
return 0;
}
if (skb->nh.ipv6h->payload_len) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
return 0;
}
if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
- IP6_INC_STATS_BH(InTruncatedPkts);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
}
if (pkt_len + sizeof(struct ipv6hdr) < skb->len) {
*/
dst = ip6_route_output(sk, fl);
if (dst->error) {
- IP6_INC_STATS(OutNoRoutes);
+ IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
res = 1;
} else {
err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
- ICMP6_INC_STATS_OFFSET_BH(idev, Icmp6OutDestUnreachs, type - ICMPV6_DEST_UNREACH);
- ICMP6_INC_STATS_BH(idev, Icmp6OutMsgs);
+ ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
+ ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
out_put:
if (likely(idev != NULL))
}
err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
- ICMP6_INC_STATS_BH(idev, Icmp6OutEchoReplies);
- ICMP6_INC_STATS_BH(idev, Icmp6OutMsgs);
+ ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
+ ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
out_put:
if (likely(idev != NULL))
struct icmp6hdr *hdr;
int type;
- ICMP6_INC_STATS_BH(idev, Icmp6InMsgs);
+ ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
saddr = &skb->nh.ipv6h->saddr;
daddr = &skb->nh.ipv6h->daddr;
type = hdr->icmp6_type;
if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
- ICMP6_INC_STATS_OFFSET_BH(idev, Icmp6InDestUnreachs, type - ICMPV6_DEST_UNREACH);
+ ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
- ICMP6_INC_STATS_OFFSET_BH(idev, Icmp6InEchos, type - ICMPV6_ECHO_REQUEST);
+ ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST);
switch (type) {
case ICMPV6_ECHO_REQUEST:
break;
case ICMPV6_MGM_REDUCTION:
+ case ICMPV6_NI_QUERY:
+ case ICMPV6_NI_REPLY:
case ICMPV6_MLD2_REPORT:
+ case ICMPV6_DHAAD_REQUEST:
+ case ICMPV6_DHAAD_REPLY:
+ case ICMPV6_MOBILE_PREFIX_SOL:
+ case ICMPV6_MOBILE_PREFIX_ADV:
break;
default:
return 0;
discard_it:
- ICMP6_INC_STATS_BH(idev, Icmp6InErrors);
+ ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
kfree_skb(skb);
return 0;
}
static struct timer_list ip6_fib_timer = TIMER_INITIALIZER(fib6_run_gc, 0, 0);
-static struct fib6_walker_t fib6_walker_list = {
+struct fib6_walker_t fib6_walker_list = {
.prev = &fib6_walker_list,
.next = &fib6_walker_list,
};
static int ip6fl_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
- seq_printf(seq, "Label S Owner Users Linger Expires "
- "Dst Opt\n");
+ seq_puts(seq, "Label S Owner Users Linger Expires "
+ "Dst Opt\n");
else
ip6fl_fl_seq_show(seq, v);
return 0;
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
- IP6_INC_STATS_BH(InReceives);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INRECEIVES);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- IP6_INC_STATS_BH(InDiscards);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
goto out;
}
goto err;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
goto drop;
}
goto truncated;
if (pkt_len + sizeof(struct ipv6hdr) < skb->len) {
if (__pskb_trim(skb, pkt_len + sizeof(struct ipv6hdr))){
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
goto drop;
}
hdr = skb->nh.ipv6h;
if (hdr->nexthdr == NEXTHDR_HOP) {
skb->h.raw = (u8*)(hdr+1);
if (ipv6_parse_hopopts(skb, offsetof(struct ipv6hdr, nexthdr)) < 0) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
return 0;
}
hdr = skb->nh.ipv6h;
return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish);
truncated:
- IP6_INC_STATS_BH(InTruncatedPkts);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS);
err:
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
drop:
kfree_skb(skb);
out:
if (ret > 0)
goto resubmit;
else if (ret == 0)
- IP6_INC_STATS_BH(InDelivers);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
} else {
if (!raw_sk) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- IP6_INC_STATS_BH(InUnknownProtos);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
}
} else {
- IP6_INC_STATS_BH(InDelivers);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
kfree_skb(skb);
}
}
return 0;
discard:
- IP6_INC_STATS_BH(InDiscards);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
rcu_read_unlock();
kfree_skb(skb);
return 0;
struct ipv6hdr *hdr;
int deliver;
- IP6_INC_STATS_BH(InMcastPkts);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS);
hdr = skb->nh.ipv6h;
deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) ||
} else if (dst->neighbour)
return dst->neighbour->output(skb);
- IP6_INC_STATS_BH(OutNoRoutes);
+ IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EINVAL;
ip6_dev_loopback_xmit);
if (skb->nh.ipv6h->hop_limit == 0) {
- IP6_INC_STATS(OutDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
}
- IP6_INC_STATS(OutMcastPkts);
+ IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
}
return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
dst = ip6_route_output(skb->sk, &fl);
if (dst->error) {
- IP6_INC_STATS(OutNoRoutes);
+ IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
LIMIT_NETDEBUG(
printk(KERN_DEBUG "ip6_route_me_harder: No more route.\n"));
dst_release(dst);
kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
- IP6_INC_STATS(OutDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
return -ENOBUFS;
}
if (sk)
mtu = dst_pmtu(dst);
if ((skb->len <= mtu) || ipfragok) {
- IP6_INC_STATS(OutRequests);
+ IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
}
printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
- IP6_INC_STATS(FragFails);
+ IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
goto error;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
- IP6_INC_STATS(InDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
goto drop;
}
}
if (!xfrm6_route_forward(skb)) {
- IP6_INC_STATS(InDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
goto drop;
}
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_pmtu(dst), skb->dev);
- IP6_INC_STATS_BH(InTooBigErrors);
- IP6_INC_STATS_BH(FragFails);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
+ IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
if (skb_cow(skb, dst->dev->hard_header_len)) {
- IP6_INC_STATS(OutDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
goto drop;
}
hdr->hop_limit--;
- IP6_INC_STATS_BH(OutForwDatagrams);
+ IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
error:
- IP6_INC_STATS_BH(InAddrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
drop:
kfree_skb(skb);
return -EINVAL;
tmp_hdr = kmalloc(hlen, GFP_ATOMIC);
if (!tmp_hdr) {
- IP6_INC_STATS(FragFails);
+ IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
return -ENOMEM;
}
kfree(tmp_hdr);
if (err == 0) {
- IP6_INC_STATS(FragOKs);
+ IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
return 0;
}
frag = skb;
}
- IP6_INC_STATS(FragFails);
+ IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
return err;
}
if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
NETDEBUG(printk(KERN_INFO "IPv6: frag: no memory for new fragment!\n"));
- IP6_INC_STATS(FragFails);
+ IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
err = -ENOMEM;
goto fail;
}
* Put this fragment into the sending queue.
*/
- IP6_INC_STATS(FragCreates);
+ IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
err = output(&frag);
if (err)
goto fail;
}
kfree_skb(skb);
- IP6_INC_STATS(FragOKs);
+ IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
return err;
fail:
kfree_skb(skb);
- IP6_INC_STATS(FragFails);
+ IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
return err;
}
return 0;
error:
inet->cork.length -= length;
- IP6_INC_STATS(OutDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
return err;
}
ipv6_addr_copy(&hdr->daddr, final_dst);
skb->dst = dst_clone(&rt->u.dst);
- IP6_INC_STATS(OutRequests);
+ IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
if (err) {
if (err > 0)
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
- IP6_INC_STATS(OutDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
}
*/
#include <linux/config.h>
#include <linux/module.h>
-#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ipcomp.h>
{
int err = 0;
u8 nexthdr = 0;
- u8 *prevhdr;
int hdr_len = skb->h.raw - skb->nh.raw;
unsigned char *tmp_hdr = NULL;
struct ipv6hdr *iph;
iph = skb->nh.ipv6h;
iph->payload_len = htons(skb->len);
- ip6_find_1stfragopt(skb, &prevhdr);
- *prevhdr = nexthdr;
out:
if (tmp_hdr)
kfree(tmp_hdr);
int err;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *iph, *top_iph;
- int hdr_len = 0;
+ struct ipv6hdr *top_iph;
+ int hdr_len;
struct ipv6_comp_hdr *ipch;
struct ipcomp_data *ipcd = x->data;
- u8 *prevhdr;
- u8 nexthdr = 0;
int plen, dlen;
u8 *start, *scratch = ipcd->scratch;
- if ((*pskb)->ip_summed == CHECKSUM_HW) {
- err = skb_checksum_help(pskb, 0);
- if (err)
- goto error_nolock;
- }
-
- spin_lock_bh(&x->lock);
-
- err = xfrm_state_check(x, *pskb);
- if (err)
- goto error;
-
- if (x->props.mode) {
- err = xfrm6_tunnel_check_size(*pskb);
- if (err)
- goto error;
-
- hdr_len = sizeof(struct ipv6hdr);
- nexthdr = IPPROTO_IPV6;
- iph = (*pskb)->nh.ipv6h;
- top_iph = (struct ipv6hdr *)skb_push(*pskb, sizeof(struct ipv6hdr));
- top_iph->version = 6;
- top_iph->priority = iph->priority;
- top_iph->flow_lbl[0] = iph->flow_lbl[0];
- top_iph->flow_lbl[1] = iph->flow_lbl[1];
- top_iph->flow_lbl[2] = iph->flow_lbl[2];
- top_iph->nexthdr = IPPROTO_IPV6; /* initial */
- top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
- top_iph->hop_limit = iph->hop_limit;
- memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr));
- memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr));
- (*pskb)->nh.raw = (*pskb)->data; /* == top_iph */
- (*pskb)->h.raw = (*pskb)->nh.raw + hdr_len;
- } else {
- hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
- nexthdr = *prevhdr;
- }
+ hdr_len = (*pskb)->h.raw - (*pskb)->data;
/* check whether datagram len is larger than threshold */
if (((*pskb)->len - hdr_len) < ipcd->threshold) {
/* compression */
plen = (*pskb)->len - hdr_len;
dlen = IPCOMP_SCRATCH_SIZE;
- start = (*pskb)->data + hdr_len;
+ start = (*pskb)->h.raw;
err = crypto_comp_compress(ipcd->tfm, start, plen, scratch, &dlen);
if (err) {
pskb_trim(*pskb, hdr_len + dlen + sizeof(struct ip_comp_hdr));
/* insert ipcomp header and replace datagram */
- top_iph = (*pskb)->nh.ipv6h;
+ top_iph = (struct ipv6hdr *)(*pskb)->data;
- if (x->props.mode && (x->props.flags & XFRM_STATE_NOECN))
- IP6_ECN_clear(top_iph);
top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
- (*pskb)->nh.raw = (*pskb)->data; /* top_iph */
- ip6_find_1stfragopt(*pskb, &prevhdr);
- *prevhdr = IPPROTO_COMP;
- ipch = (struct ipv6_comp_hdr *)((unsigned char *)top_iph + hdr_len);
- ipch->nexthdr = nexthdr;
+ ipch = (struct ipv6_comp_hdr *)start;
+ ipch->nexthdr = *(*pskb)->nh.raw;
ipch->flags = 0;
ipch->cpi = htons((u16 )ntohl(x->id.spi));
+ *(*pskb)->nh.raw = IPPROTO_COMP;
- (*pskb)->h.raw = (unsigned char*)ipch;
out_ok:
- x->curlft.bytes += (*pskb)->len;
- x->curlft.packets++;
- spin_unlock_bh(&x->lock);
-
- if (((*pskb)->dst = dst_pop(dst)) == NULL) {
- err = -EHOSTUNREACH;
- goto error_nolock;
- }
- err = NET_XMIT_BYPASS;
+ err = 0;
-out_exit:
- return err;
error:
- spin_unlock_bh(&x->lock);
-error_nolock:
- kfree_skb(*pskb);
- goto out_exit;
+ return err;
}
static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ipv6_comp_hdr *ipcomph = (struct ipv6_comp_hdr*)(skb->data+offset);
struct xfrm_state *x;
- if (type != ICMPV6_DEST_UNREACH || type != ICMPV6_PKT_TOOBIG)
+ if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG)
return;
spi = ntohl(ntohs(ipcomph->cpi));
struct inet6_dev *idev = in6_dev_get(skb->dev);
int err;
- IP6_INC_STATS(OutRequests);
+ IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
payload_len = skb->tail - (unsigned char *)skb->nh.ipv6h -
sizeof(struct ipv6hdr);
mldlen = skb->tail - skb->h.raw;
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
if (!err) {
- ICMP6_INC_STATS(idev,Icmp6OutMsgs);
- IP6_INC_STATS(OutMcastPkts);
+ ICMP6_INC_STATS(idev,ICMP6_MIB_OUTMSGS);
+ IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
} else
- IP6_INC_STATS(OutDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
if (likely(idev != NULL))
in6_dev_put(idev);
IPV6_TLV_ROUTERALERT, 2, 0, 0,
IPV6_TLV_PADN, 0 };
- IP6_INC_STATS(OutRequests);
+ IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
snd_addr = addr;
if (type == ICMPV6_MGM_REDUCTION) {
snd_addr = &all_routers;
skb = sock_alloc_send_skb(sk, LL_RESERVED_SPACE(dev) + full_len, 1, &err);
if (skb == NULL) {
- IP6_INC_STATS(OutDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
return;
}
dev_queue_xmit);
if (!err) {
if (type == ICMPV6_MGM_REDUCTION)
- ICMP6_INC_STATS(idev, Icmp6OutGroupMembReductions);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBREDUCTIONS);
else
- ICMP6_INC_STATS(idev, Icmp6OutGroupMembResponses);
- ICMP6_INC_STATS(idev, Icmp6OutMsgs);
- IP6_INC_STATS(OutMcastPkts);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBRESPONSES);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
+ IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
} else
- IP6_INC_STATS(OutDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
if (likely(idev != NULL))
in6_dev_put(idev);
return;
out:
- IP6_INC_STATS(OutDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
}
skb->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(OutRequests);
+ IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, Icmp6OutNeighborAdvertisements);
- ICMP6_INC_STATS(idev, Icmp6OutMsgs);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
}
if (likely(idev != NULL))
/* send it! */
skb->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(OutRequests);
+ IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, Icmp6OutNeighborSolicits);
- ICMP6_INC_STATS(idev, Icmp6OutMsgs);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORSOLICITS);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
}
if (likely(idev != NULL))
/* send it! */
skb->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(OutRequests);
+ IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, Icmp6OutRouterSolicits);
- ICMP6_INC_STATS(idev, Icmp6OutMsgs);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTROUTERSOLICITS);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
}
if (likely(idev != NULL))
buff->dst = dst;
idev = in6_dev_get(dst->dev);
- IP6_INC_STATS(OutRequests);
+ IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, buff, NULL, dst->dev, dst_output);
if (!err) {
- ICMP6_INC_STATS(idev, Icmp6OutRedirects);
- ICMP6_INC_STATS(idev, Icmp6OutMsgs);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTREDIRECTS);
+ ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
}
if (likely(idev != NULL))
};
#ifdef CONFIG_SYSCTL
-int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * filp, void __user *buffer, size_t *lenp)
+int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * filp, void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct net_device *dev = ctl->extra1;
struct inet6_dev *idev;
inet6_ifinfo_notify(RTM_NEWLINK, idev);
in6_dev_put(idev);
}
- return proc_dointvec(ctl, write, filp, buffer, lenp);
+ return proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
}
#endif
return 0;
}
-static struct snmp_item snmp6_ipstats_list[] = {
+static struct snmp_mib snmp6_ipstats_list[] = {
/* ipv6 mib according to RFC 2465 */
-#define SNMP6_GEN(x) SNMP_ITEM(struct ipstats_mib, x, "Ip6" #x)
- SNMP6_GEN(InReceives),
- SNMP6_GEN(InHdrErrors),
- SNMP6_GEN(InTooBigErrors),
- SNMP6_GEN(InNoRoutes),
- SNMP6_GEN(InAddrErrors),
- SNMP6_GEN(InUnknownProtos),
- SNMP6_GEN(InTruncatedPkts),
- SNMP6_GEN(InDiscards),
- SNMP6_GEN(InDelivers),
- SNMP6_GEN(OutForwDatagrams),
- SNMP6_GEN(OutRequests),
- SNMP6_GEN(OutDiscards),
- SNMP6_GEN(OutNoRoutes),
- SNMP6_GEN(ReasmTimeout),
- SNMP6_GEN(ReasmReqds),
- SNMP6_GEN(ReasmOKs),
- SNMP6_GEN(ReasmFails),
- SNMP6_GEN(FragOKs),
- SNMP6_GEN(FragFails),
- SNMP6_GEN(FragCreates),
- SNMP6_GEN(InMcastPkts),
- SNMP6_GEN(OutMcastPkts),
-#undef SNMP6_GEN
- SNMP_ITEM_SENTINEL
+ SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INRECEIVES),
+ SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS),
+ SNMP_MIB_ITEM("Ip6InTooBigErrors", IPSTATS_MIB_INTOOBIGERRORS),
+ SNMP_MIB_ITEM("Ip6InNoRoutes", IPSTATS_MIB_INNOROUTES),
+ SNMP_MIB_ITEM("Ip6InAddrErrors", IPSTATS_MIB_INADDRERRORS),
+ SNMP_MIB_ITEM("Ip6InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS),
+ SNMP_MIB_ITEM("Ip6InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
+ SNMP_MIB_ITEM("Ip6InDiscards", IPSTATS_MIB_INDISCARDS),
+ SNMP_MIB_ITEM("Ip6InDelivers", IPSTATS_MIB_INDELIVERS),
+ SNMP_MIB_ITEM("Ip6OutForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS),
+ SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTREQUESTS),
+ SNMP_MIB_ITEM("Ip6OutDiscards", IPSTATS_MIB_OUTDISCARDS),
+ SNMP_MIB_ITEM("Ip6OutNoRoutes", IPSTATS_MIB_OUTNOROUTES),
+ SNMP_MIB_ITEM("Ip6ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT),
+ SNMP_MIB_ITEM("Ip6ReasmReqds", IPSTATS_MIB_REASMREQDS),
+ SNMP_MIB_ITEM("Ip6ReasmOKs", IPSTATS_MIB_REASMOKS),
+ SNMP_MIB_ITEM("Ip6ReasmFails", IPSTATS_MIB_REASMFAILS),
+ SNMP_MIB_ITEM("Ip6FragOKs", IPSTATS_MIB_FRAGOKS),
+ SNMP_MIB_ITEM("Ip6FragFails", IPSTATS_MIB_FRAGFAILS),
+ SNMP_MIB_ITEM("Ip6FragCreates", IPSTATS_MIB_FRAGCREATES),
+ SNMP_MIB_ITEM("Ip6InMcastPkts", IPSTATS_MIB_INMCASTPKTS),
+ SNMP_MIB_ITEM("Ip6OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS),
+ SNMP_MIB_SENTINEL
};
-static struct snmp_item snmp6_icmp6_list[] = {
+static struct snmp_mib snmp6_icmp6_list[] = {
/* icmpv6 mib according to RFC 2466
Exceptions: {In|Out}AdminProhibs are removed, because I see
OutRouterAdvertisements too.
OutGroupMembQueries too.
*/
-#define SNMP6_GEN(x) SNMP_ITEM(struct icmpv6_mib, x, #x)
- SNMP6_GEN(Icmp6InMsgs),
- SNMP6_GEN(Icmp6InErrors),
- SNMP6_GEN(Icmp6InDestUnreachs),
- SNMP6_GEN(Icmp6InPktTooBigs),
- SNMP6_GEN(Icmp6InTimeExcds),
- SNMP6_GEN(Icmp6InParmProblems),
- SNMP6_GEN(Icmp6InEchos),
- SNMP6_GEN(Icmp6InEchoReplies),
- SNMP6_GEN(Icmp6InGroupMembQueries),
- SNMP6_GEN(Icmp6InGroupMembResponses),
- SNMP6_GEN(Icmp6InGroupMembReductions),
- SNMP6_GEN(Icmp6InRouterSolicits),
- SNMP6_GEN(Icmp6InRouterAdvertisements),
- SNMP6_GEN(Icmp6InNeighborSolicits),
- SNMP6_GEN(Icmp6InNeighborAdvertisements),
- SNMP6_GEN(Icmp6InRedirects),
- SNMP6_GEN(Icmp6OutMsgs),
- SNMP6_GEN(Icmp6OutDestUnreachs),
- SNMP6_GEN(Icmp6OutPktTooBigs),
- SNMP6_GEN(Icmp6OutTimeExcds),
- SNMP6_GEN(Icmp6OutParmProblems),
- SNMP6_GEN(Icmp6OutEchoReplies),
- SNMP6_GEN(Icmp6OutRouterSolicits),
- SNMP6_GEN(Icmp6OutNeighborSolicits),
- SNMP6_GEN(Icmp6OutNeighborAdvertisements),
- SNMP6_GEN(Icmp6OutRedirects),
- SNMP6_GEN(Icmp6OutGroupMembResponses),
- SNMP6_GEN(Icmp6OutGroupMembReductions),
-#undef SNMP6_GEN
- SNMP_ITEM_SENTINEL
+ SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
+ SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
+ SNMP_MIB_ITEM("Icmp6InDestUnreachs", ICMP6_MIB_INDESTUNREACHS),
+ SNMP_MIB_ITEM("Icmp6InPktTooBigs", ICMP6_MIB_INPKTTOOBIGS),
+ SNMP_MIB_ITEM("Icmp6InTimeExcds", ICMP6_MIB_INTIMEEXCDS),
+ SNMP_MIB_ITEM("Icmp6InParmProblems", ICMP6_MIB_INPARMPROBLEMS),
+ SNMP_MIB_ITEM("Icmp6InEchos", ICMP6_MIB_INECHOS),
+ SNMP_MIB_ITEM("Icmp6InEchoReplies", ICMP6_MIB_INECHOREPLIES),
+ SNMP_MIB_ITEM("Icmp6InGroupMembQueries", ICMP6_MIB_INGROUPMEMBQUERIES),
+ SNMP_MIB_ITEM("Icmp6InGroupMembResponses", ICMP6_MIB_INGROUPMEMBRESPONSES),
+ SNMP_MIB_ITEM("Icmp6InGroupMembReductions", ICMP6_MIB_INGROUPMEMBREDUCTIONS),
+ SNMP_MIB_ITEM("Icmp6InRouterSolicits", ICMP6_MIB_INROUTERSOLICITS),
+ SNMP_MIB_ITEM("Icmp6InRouterAdvertisements", ICMP6_MIB_INROUTERADVERTISEMENTS),
+ SNMP_MIB_ITEM("Icmp6InNeighborSolicits", ICMP6_MIB_INNEIGHBORSOLICITS),
+ SNMP_MIB_ITEM("Icmp6InNeighborAdvertisements", ICMP6_MIB_INNEIGHBORADVERTISEMENTS),
+ SNMP_MIB_ITEM("Icmp6InRedirects", ICMP6_MIB_INREDIRECTS),
+ SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
+ SNMP_MIB_ITEM("Icmp6OutDestUnreachs", ICMP6_MIB_OUTDESTUNREACHS),
+ SNMP_MIB_ITEM("Icmp6OutPktTooBigs", ICMP6_MIB_OUTPKTTOOBIGS),
+ SNMP_MIB_ITEM("Icmp6OutTimeExcds", ICMP6_MIB_OUTTIMEEXCDS),
+ SNMP_MIB_ITEM("Icmp6OutParmProblems", ICMP6_MIB_OUTPARMPROBLEMS),
+ SNMP_MIB_ITEM("Icmp6OutEchoReplies", ICMP6_MIB_OUTECHOREPLIES),
+ SNMP_MIB_ITEM("Icmp6OutRouterSolicits", ICMP6_MIB_OUTROUTERSOLICITS),
+ SNMP_MIB_ITEM("Icmp6OutNeighborSolicits", ICMP6_MIB_OUTNEIGHBORSOLICITS),
+ SNMP_MIB_ITEM("Icmp6OutNeighborAdvertisements", ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS),
+ SNMP_MIB_ITEM("Icmp6OutRedirects", ICMP6_MIB_OUTREDIRECTS),
+ SNMP_MIB_ITEM("Icmp6OutGroupMembResponses", ICMP6_MIB_OUTGROUPMEMBRESPONSES),
+ SNMP_MIB_ITEM("Icmp6OutGroupMembReductions", ICMP6_MIB_OUTGROUPMEMBREDUCTIONS),
+ SNMP_MIB_SENTINEL
};
-static struct snmp_item snmp6_udp6_list[] = {
-#define SNMP6_GEN(x) SNMP_ITEM(struct udp_mib, Udp##x, "Udp6" #x)
- SNMP6_GEN(InDatagrams),
- SNMP6_GEN(NoPorts),
- SNMP6_GEN(InErrors),
- SNMP6_GEN(OutDatagrams),
-#undef SNMP6_GEN
- SNMP_ITEM_SENTINEL
+static struct snmp_mib snmp6_udp6_list[] = {
+ SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS),
+ SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
+ SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
+ SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
+ SNMP_MIB_SENTINEL
};
static unsigned long
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
- res +=
- *((unsigned long *) (((void *)per_cpu_ptr(mib[0], i)) +
- offt));
- res +=
- *((unsigned long *) (((void *)per_cpu_ptr(mib[1], i)) +
- offt));
+ res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
+ res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
}
return res;
}
static inline void
-snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_item *itemlist)
+snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist)
{
int i;
for (i=0; itemlist[i].name; i++)
seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
- fold_field(mib, itemlist[i].offset));
+ fold_field(mib, itemlist[i].entry));
}
static int snmp6_seq_show(struct seq_file *seq, void *v)
if (np->rxopt.all)
datagram_recv_ctl(sk, msg, skb);
+
err = copied;
+ if (flags & MSG_TRUNC)
+ err = skb->len;
out_free:
skb_free_datagram(sk, skb);
if (err)
goto error_fault;
- IP6_INC_STATS(OutRequests);
+ IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
if (err > 0)
err = -EFAULT;
kfree_skb(skb);
error:
- IP6_INC_STATS(OutDiscards);
+ IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
return err;
}
static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct proto rawv6_prot = {
.name = "RAW",
.close = rawv6_close,
- .connect = udpv6_connect,
+ .connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
spin_unlock(&fq->lock);
fq_put(fq);
- IP6_INC_STATS_BH(ReasmFails);
+ IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
}
}
fq_kill(fq);
- IP6_INC_STATS_BH(ReasmTimeout);
- IP6_INC_STATS_BH(ReasmFails);
+ IP6_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
+ IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
/* Send error only if the first segment arrived. */
if (fq->last_in&FIRST_IN && fq->fragments) {
return ip6_frag_intern(hash, fq);
oom:
- IP6_INC_STATS_BH(ReasmFails);
+ IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
return NULL;
}
((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
return;
}
/* RFC2460 says always send parameter problem in
* this case. -DaveM
*/
- IP6_INC_STATS_BH(InHdrErrors);
+ IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, payload_len));
return;
return;
err:
- IP6_INC_STATS(ReasmFails);
+ IP6_INC_STATS(IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
}
if (head->ip_summed == CHECKSUM_HW)
head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
- IP6_INC_STATS_BH(ReasmOKs);
+ IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
fq->fragments = NULL;
*nhoffp = nhoff;
return 1;
if (net_ratelimit())
printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
out_fail:
- IP6_INC_STATS_BH(ReasmFails);
+ IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
return -1;
}
hdr = skb->nh.ipv6h;
- IP6_INC_STATS_BH(ReasmReqds);
+ IP6_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
/* Jumbo payload inhibits frag. header */
if (hdr->payload_len==0) {
- IP6_INC_STATS(InHdrErrors);
+ IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
return -1;
}
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
- IP6_INC_STATS(InHdrErrors);
+ IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
return -1;
}
if (!(fhdr->frag_off & htons(0xFFF9))) {
/* It is not a fragmented frame */
skb->h.raw += sizeof(struct frag_hdr);
- IP6_INC_STATS_BH(ReasmOKs);
+ IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
*nhoffp = (u8*)fhdr - skb->nh.raw;
return 1;
return ret;
}
- IP6_INC_STATS_BH(ReasmFails);
+ IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return -1;
}
/* Protected by rt6_lock. */
static struct dst_entry *ndisc_dst_gc_list;
static int ipv6_get_mtu(struct net_device *dev);
-static inline unsigned int ipv6_advmss(unsigned int mtu);
+
+static inline unsigned int ipv6_advmss(unsigned int mtu)
+{
+ mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+
+ if (mtu < ip6_rt_min_advmss)
+ mtu = ip6_rt_min_advmss;
+
+ /*
+ * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
+ * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
+ * IPV6_MAXPLEN is also valid and means: "any MSS,
+ * rely only on pmtu discovery"
+ */
+ if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
+ mtu = IPV6_MAXPLEN;
+ return mtu;
+}
struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
struct neighbour *neigh,
return mtu;
}
-static inline unsigned int ipv6_advmss(unsigned int mtu)
-{
- mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
-
- if (mtu < ip6_rt_min_advmss)
- mtu = ip6_rt_min_advmss;
-
- /*
- * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
- * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
- * IPV6_MAXPLEN is also valid and means: "any MSS,
- * rely only on pmtu discovery"
- */
- if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
- mtu = IPV6_MAXPLEN;
- return mtu;
-}
-
static int ipv6_get_hoplimit(struct net_device *dev)
{
int hoplimit = ipv6_devconf.hop_limit;
int ip6_pkt_discard(struct sk_buff *skb)
{
- IP6_INC_STATS(OutNoRoutes);
+ IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev);
kfree_skb(skb);
return 0;
static
int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
if (write) {
- proc_dointvec(ctl, write, filp, buffer, lenp);
+ proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (flush_delay < 0)
flush_delay = 0;
fib6_run_gc((unsigned long)flush_delay);
#include <net/inet_ecn.h>
#include <net/protocol.h>
#include <net/xfrm.h>
+#include <net/addrconf.h>
+#include <net/snmp.h>
#include <asm/uaccess.h>
/* Silly. Should hash-dance instead... */
local_bh_disable();
tcp_tw_deschedule(tw);
- NET_INC_STATS_BH(TimeWaitRecycled);
+ NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
local_bh_enable();
tcp_tw_put(tw);
sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
if (sk == NULL) {
- ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), Icmp6InErrors);
+ ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
return;
}
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
- NET_INC_STATS_BH(LockDroppedIcmps);
+ NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
goto out;
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
- NET_INC_STATS_BH(OutOfWindowIcmps);
+ NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
BUG_TRAP(req->sk == NULL);
if (seq != req->snt_isn) {
- NET_INC_STATS_BH(OutOfWindowIcmps);
+ NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
case TCP_SYN_RECV: /* Cannot happen.
It can, it SYNs are crossed. --ANK */
if (!sock_owned_by_user(sk)) {
- TCP_INC_STATS_BH(TcpAttemptFails);
+ TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
sk->sk_err = err;
sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
/* sk = NULL, but it is safe for now. RST socket required. */
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ip6_xmit(NULL, buff, &fl, NULL, 0);
- TCP_INC_STATS_BH(TcpOutSegs);
- TCP_INC_STATS_BH(TcpOutRsts);
+ TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
+ TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
return;
}
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ip6_xmit(NULL, buff, &fl, NULL, 0);
- TCP_INC_STATS_BH(TcpOutSegs);
+ TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
return;
}
if (req)
tcp_openreq_free(req);
- TCP_INC_STATS_BH(TcpAttemptFails);
+ TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
return 0; /* don't send reset */
}
return newsk;
out_overflow:
- NET_INC_STATS_BH(ListenOverflows);
+ NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
out:
- NET_INC_STATS_BH(ListenDrops);
+ NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
if (opt && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
kfree_skb(skb);
return 0;
csum_err:
- TCP_INC_STATS_BH(TcpInErrs);
+ TCP_INC_STATS_BH(TCP_MIB_INERRS);
goto discard;
/*
* Count it even if it's bad.
*/
- TCP_INC_STATS_BH(TcpInSegs);
+ TCP_INC_STATS_BH(TCP_MIB_INSEGS);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
bad_packet:
- TCP_INC_STATS_BH(TcpInErrs);
+ TCP_INC_STATS_BH(TCP_MIB_INERRS);
} else {
tcp_v6_send_reset(skb);
}
}
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
- TCP_INC_STATS_BH(TcpInErrs);
+ TCP_INC_STATS_BH(TCP_MIB_INERRS);
tcp_tw_put((struct tcp_tw_bucket *) sk);
goto discard_it;
}
struct tcp_iter_state *st;
if (v == SEQ_START_TOKEN) {
- seq_printf(seq,
- " sl "
- "local_address "
- "remote_address "
- "st tx_queue rx_queue tr tm->when retrnsmt"
- " uid timeout inode\n");
+ seq_puts(seq,
+ " sl "
+ "local_address "
+ "remote_address "
+ "st tx_queue rx_queue tr tm->when retrnsmt"
+ " uid timeout inode\n");
goto out;
}
st = seq->private;
#include <net/addrconf.h>
#include <net/ip.h>
#include <net/udp.h>
+#include <net/raw.h>
#include <net/inet_common.h>
#include <net/ip6_checksum.h>
*
*/
-int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
-{
- struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
- struct inet_opt *inet = inet_sk(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct in6_addr *daddr;
- struct dst_entry *dst;
- struct flowi fl;
- struct ip6_flowlabel *flowlabel = NULL;
- int addr_type;
- int err;
-
- if (usin->sin6_family == AF_INET) {
- if (__ipv6_only_sock(sk))
- return -EAFNOSUPPORT;
- err = udp_connect(sk, uaddr, addr_len);
- goto ipv4_connected;
- }
-
- if (addr_len < SIN6_LEN_RFC2133)
- return -EINVAL;
-
- if (usin->sin6_family != AF_INET6)
- return -EAFNOSUPPORT;
-
- memset(&fl, 0, sizeof(fl));
- if (np->sndflow) {
- fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
- if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
- flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
- if (flowlabel == NULL)
- return -EINVAL;
- ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
- }
- }
-
- addr_type = ipv6_addr_type(&usin->sin6_addr);
-
- if (addr_type == IPV6_ADDR_ANY) {
- /*
- * connect to self
- */
- usin->sin6_addr.s6_addr[15] = 0x01;
- }
-
- daddr = &usin->sin6_addr;
-
- if (addr_type == IPV6_ADDR_MAPPED) {
- struct sockaddr_in sin;
-
- if (__ipv6_only_sock(sk)) {
- err = -ENETUNREACH;
- goto out;
- }
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = daddr->s6_addr32[3];
- sin.sin_port = usin->sin6_port;
-
- err = udp_connect(sk, (struct sockaddr*) &sin, sizeof(sin));
-
-ipv4_connected:
- if (err)
- goto out;
-
- ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr);
-
- if (ipv6_addr_any(&np->saddr)) {
- ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff),
- inet->saddr);
- }
-
- if (ipv6_addr_any(&np->rcv_saddr)) {
- ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff),
- inet->rcv_saddr);
- }
- goto out;
- }
-
- if (addr_type&IPV6_ADDR_LINKLOCAL) {
- if (addr_len >= sizeof(struct sockaddr_in6) &&
- usin->sin6_scope_id) {
- if (sk->sk_bound_dev_if &&
- sk->sk_bound_dev_if != usin->sin6_scope_id) {
- err = -EINVAL;
- goto out;
- }
- sk->sk_bound_dev_if = usin->sin6_scope_id;
- if (!sk->sk_bound_dev_if &&
- (addr_type & IPV6_ADDR_MULTICAST))
- fl.oif = np->mcast_oif;
- }
-
- /* Connect to link-local address requires an interface */
- if (!sk->sk_bound_dev_if) {
- err = -EINVAL;
- goto out;
- }
- }
-
- ipv6_addr_copy(&np->daddr, daddr);
- np->flow_label = fl.fl6_flowlabel;
-
- inet->dport = usin->sin6_port;
-
- /*
- * Check for a route to destination an obtain the
- * destination cache for it.
- */
-
- fl.proto = IPPROTO_UDP;
- ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
- ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- fl.oif = sk->sk_bound_dev_if;
- fl.fl_ip_dport = inet->dport;
- fl.fl_ip_sport = inet->sport;
-
- if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST))
- fl.oif = np->mcast_oif;
-
- if (flowlabel) {
- if (flowlabel->opt && flowlabel->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- }
- } else if (np->opt && np->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- }
-
- err = ip6_dst_lookup(sk, &dst, &fl);
- if (err)
- goto out;
-
- /* source address lookup done in ip6_dst_lookup */
-
- if (ipv6_addr_any(&np->saddr))
- ipv6_addr_copy(&np->saddr, &fl.fl6_src);
-
- if (ipv6_addr_any(&np->rcv_saddr)) {
- ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
- inet->rcv_saddr = LOOPBACK4_IPV6;
- }
-
- ip6_dst_store(sk, dst,
- !ipv6_addr_cmp(&fl.fl6_dst, &np->daddr) ?
- &np->daddr : NULL);
-
- sk->sk_state = TCP_ESTABLISHED;
-out:
- fl6_sock_release(flowlabel);
- return err;
-}
-
static void udpv6_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
sin6->sin6_scope_id = IP6CB(skb)->iif;
}
}
+
err = copied;
+ if (flags & MSG_TRUNC)
+ err = skb->len - sizeof(struct udphdr);
out_free:
skb_free_datagram(sk, skb);
skb_free_datagram(sk, skb);
if (flags & MSG_DONTWAIT) {
- UDP6_INC_STATS_USER(UdpInErrors);
+ UDP6_INC_STATS_USER(UDP_MIB_INERRORS);
return -EAGAIN;
}
goto try_again;
if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
- UDP6_INC_STATS_BH(UdpInErrors);
+ UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
kfree_skb(skb);
return 0;
}
}
if (sock_queue_rcv_skb(sk,skb)<0) {
- UDP6_INC_STATS_BH(UdpInErrors);
+ UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
kfree_skb(skb);
return 0;
}
- UDP6_INC_STATS_BH(UdpInDatagrams);
+ UDP6_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
return 0;
}
if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
(unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
goto discard;
- UDP6_INC_STATS_BH(UdpNoPorts);
+ UDP6_INC_STATS_BH(UDP_MIB_NOPORTS);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
printk(KERN_DEBUG "UDP: short packet: %d/%u\n", ulen, skb->len);
discard:
- UDP6_INC_STATS_BH(UdpInErrors);
+ UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
kfree_skb(skb);
return(0);
}
out:
fl6_sock_release(flowlabel);
if (!err) {
- UDP6_INC_STATS_USER(UdpOutDatagrams);
+ UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
return len;
}
return err;
struct proto udpv6_prot = {
.name = "UDP",
.close = udpv6_close,
- .connect = udpv6_connect,
+ .connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udpv6_destroy_sock,
struct xfrm_state *x;
int xfrm_nr = 0;
int decaps = 0;
- int nexthdr = 0;
- u8 *prevhdr = NULL;
+ int nexthdr;
+ unsigned int nhoff;
- ip6_find_1stfragopt(skb, &prevhdr);
- nexthdr = *prevhdr;
- *nhoffp = prevhdr - skb->nh.raw;
+ nhoff = *nhoffp;
+ nexthdr = skb->nh.raw[nhoff];
if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
goto drop;
if (nexthdr <= 0)
goto drop_unlock;
+ skb->nh.raw[nhoff] = nexthdr;
+
if (x->props.replay_window)
xfrm_replay_advance(x, seq);
/* Copy neighbour for reachability confirmation */
dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
dst_prev->input = rt->u.dst.input;
- dst_prev->output = dst_prev->xfrm->type->output;
+ dst_prev->output = xfrm6_output;
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
#include <linux/ipsec.h>
#include <net/ipv6.h>
-extern struct xfrm_state_afinfo xfrm6_state_afinfo;
+static struct xfrm_state_afinfo xfrm6_state_afinfo;
static void
__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
proto == x->id.proto &&
!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)x->props.saddr.a6) &&
reqid == x->props.reqid &&
- x->km.state == XFRM_STATE_ACQ) {
- if (!x0)
- x0 = x;
- if (x->id.spi)
- continue;
+ x->km.state == XFRM_STATE_ACQ &&
+ !x->id.spi) {
x0 = x;
break;
}
#include <linux/list.h>
#include <net/ip.h>
#include <net/xfrm.h>
-#include <net/icmp.h>
#include <net/ipv6.h>
+#include <net/protocol.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
EXPORT_SYMBOL(xfrm6_tunnel_free_spi);
-int xfrm6_tunnel_check_size(struct sk_buff *skb)
-{
- int mtu, ret = 0;
- struct dst_entry *dst = skb->dst;
-
- mtu = dst_pmtu(dst) - sizeof(struct ipv6hdr);
- if (mtu < IPV6_MIN_MTU)
- mtu = IPV6_MIN_MTU;
-
- if (skb->len > mtu) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
- ret = -EMSGSIZE;
- }
-
- return ret;
-}
-
-EXPORT_SYMBOL(xfrm6_tunnel_check_size);
-
static int xfrm6_tunnel_output(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
- struct dst_entry *dst = skb->dst;
- struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *iph, *top_iph;
- int err;
-
- if ((err = xfrm6_tunnel_check_size(skb)) != 0)
- goto error_nolock;
-
- iph = skb->nh.ipv6h;
-
- top_iph = (struct ipv6hdr *)skb_push(skb, x->props.header_len);
- top_iph->version = 6;
- top_iph->priority = iph->priority;
- top_iph->flow_lbl[0] = iph->flow_lbl[0];
- top_iph->flow_lbl[1] = iph->flow_lbl[1];
- top_iph->flow_lbl[2] = iph->flow_lbl[2];
- top_iph->nexthdr = IPPROTO_IPV6;
- top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
- top_iph->hop_limit = iph->hop_limit;
- memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr));
- memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr));
- skb->nh.raw = skb->data;
- skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr);
-
- x->curlft.bytes += skb->len;
- x->curlft.packets++;
+ struct ipv6hdr *top_iph;
- spin_unlock_bh(&x->lock);
-
- if ((skb->dst = dst_pop(dst)) == NULL) {
- kfree_skb(skb);
- err = -EHOSTUNREACH;
- goto error_nolock;
- }
-
- return NET_XMIT_BYPASS;
+ top_iph = (struct ipv6hdr *)skb->data;
+ top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
-error_nolock:
- kfree_skb(skb);
- return err;
+ return 0;
}
static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
unsigned char *asmptr;
int err;
- IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len);
+ IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
/* Note : socket.c set MSG_EOR on SEQPACKET sockets */
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
/* Check that we don't send out to big frames */
if (len > self->max_data_size) {
- IRDA_DEBUG(2, "%s(), Chopping frame from %d to %d bytes!\n",
+ IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n",
__FUNCTION__, len, self->max_data_size);
len = self->max_data_size;
}
copied = skb->len;
if (copied > size) {
- IRDA_DEBUG(2, "%s(), Received truncated frame (%d < %d)!\n",
+ IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
__FUNCTION__, copied, size);
copied = size;
msg->msg_flags |= MSG_TRUNC;
unsigned char *asmptr;
int err;
- IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len);
+ IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
*/
if (len > self->max_data_size) {
IRDA_DEBUG(0, "%s(), Warning to much data! "
- "Chopping frame from %d to %d bytes!\n",
+ "Chopping frame from %zd to %d bytes!\n",
__FUNCTION__, len, self->max_data_size);
len = self->max_data_size;
}
unsigned char *asmptr;
int err;
- IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len);
+ IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len);
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
*/
if (len > self->max_data_size) {
IRDA_DEBUG(0, "%s(), Warning to much data! "
- "Chopping frame from %d to %d bytes!\n",
+ "Chopping frame from %zd to %d bytes!\n",
__FUNCTION__, len, self->max_data_size);
len = self->max_data_size;
}
* us on that - Jean II */
static int do_devname(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
- ret = proc_dostring(table, write, filp, buffer, lenp);
+ ret = proc_dostring(table, write, filp, buffer, lenp, ppos);
if (ret == 0 && write) {
struct ias_value *val;
min_spi = range->sadb_spirange_min;
max_spi = range->sadb_spirange_max;
} else {
- min_spi = htonl(0x100);
- max_spi = htonl(0x0fffffff);
+ min_spi = 0x100;
+ max_spi = 0x0fffffff;
}
- xfrm_alloc_spi(x, min_spi, max_spi);
+ xfrm_alloc_spi(x, htonl(min_spi), htonl(max_spi));
if (x->id.spi)
resp_skb = pfkey_xfrm_state2msg(x, 0, 3);
}
*/
#include <linux/config.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
module_init(nr_proto_init);
-
-MODULE_PARM(nr_ndevs, "i");
+module_param(nr_ndevs, int, 0);
MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
*/
#include <linux/config.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
-HLIST_HEAD(rose_list);
+static HLIST_HEAD(rose_list);
spinlock_t rose_list_lock = SPIN_LOCK_UNLOCKED;
static struct proto_ops rose_proto_ops;
}
module_init(rose_proto_init);
-MODULE_PARM(rose_ndevs, "i");
+module_param(rose_ndevs, int, 0);
MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
struct rxrpc_message *msg)
{
struct msghdr msghdr;
- mm_segment_t oldfs;
int ret;
_enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
/* set up the message to be transmitted */
msghdr.msg_name = &conn->addr;
msghdr.msg_namelen = sizeof(conn->addr);
- /*
- * the following is safe, since for compiler definitions of kvec and
- * iovec are identical, yielding the same in-core layout and alignment
- */
- msghdr.msg_iov = (struct iovec *)msg->data;
- msghdr.msg_iovlen = msg->dcount;
msghdr.msg_control = NULL;
msghdr.msg_controllen = 0;
msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT;
htons(conn->addr.sin_port));
/* send the message */
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- ret = sock_sendmsg(conn->trans->socket, &msghdr, msg->dsize);
- set_fs(oldfs);
-
+ ret = kernel_sendmsg(conn->trans->socket, &msghdr,
+ msg->data, msg->dcount, msg->dsize);
if (ret < 0) {
msg->state = RXRPC_MSG_ERROR;
- }
- else {
+ } else {
msg->state = RXRPC_MSG_SENT;
ret = 0;
struct sockaddr_in sin;
struct msghdr msghdr;
struct kvec iov[2];
- mm_segment_t oldfs;
uint32_t _error;
int len, ret;
msghdr.msg_name = &sin;
msghdr.msg_namelen = sizeof(sin);
- /*
- * the following is safe, since for compiler definitions of kvec and
- * iovec are identical, yielding the same in-core layout and alignment
- */
- msghdr.msg_iov = (struct iovec *)iov;
- msghdr.msg_iovlen = 2;
msghdr.msg_control = NULL;
msghdr.msg_controllen = 0;
msghdr.msg_flags = MSG_DONTWAIT;
htons(sin.sin_port));
/* send the message */
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- ret = sock_sendmsg(trans->socket, &msghdr, len);
- set_fs(oldfs);
+ ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len);
_leave(" = %d", ret);
return ret;
struct list_head connq, *_p;
struct errormsg emsg;
struct msghdr msg;
- mm_segment_t oldfs;
uint16_t port;
int local, err;
/* try and receive an error message */
msg.msg_name = &sin;
msg.msg_namelen = sizeof(sin);
- msg.msg_iov = NULL;
- msg.msg_iovlen = 0;
msg.msg_control = &emsg;
msg.msg_controllen = sizeof(emsg);
msg.msg_flags = 0;
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = sock_recvmsg(trans->socket, &msg, 0,
+ err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0,
MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
- set_fs(oldfs);
if (err == -EAGAIN) {
_leave("");
#
# Traffic control configuration.
#
+choice
+ prompt "Packet scheduler clock source"
+ depends on NET_SCHED
+ default NET_SCH_CLK_JIFFIES
+ help
+ Packet schedulers need a monotonic clock that increments at a static
+ rate. The kernel provides several suitable interfaces, each with
+ different properties:
+
+ - high resolution (us or better)
+ - fast to read (minimal locking, no i/o access)
+ - synchronized on all processors
+ - handles cpu clock frequency changes
+
+ but nothing provides all of the above.
+
+config NET_SCH_CLK_JIFFIES
+ bool "Timer interrupt"
+ help
+ Say Y here if you want to use the timer interrupt (jiffies) as clock
+ source. This clock source is fast, synchronized on all processors and
+ handles cpu clock frequency changes, but its resolution is too low
+ for accurate shaping except at very low speed.
+
+config NET_SCH_CLK_GETTIMEOFDAY
+ bool "gettimeofday"
+ help
+ Say Y here if you want to use gettimeofday as clock source. This clock
+ source has high resolution, is synchronized on all processors and
+ handles cpu clock frequency changes, but it is slow.
+
+ Choose this if you need a high resolution clock source but can't use
+ the CPU's cycle counter.
+
+config NET_SCH_CLK_CPU
+ bool "CPU cycle counter"
+ depends on X86_TSC || X86_64 || ALPHA || SPARC64 || PPC64 || IA64
+ help
+ Say Y here if you want to use the CPU's cycle counter as clock source.
+ This is a cheap and high resolution clock source, but on some
+ architectures it is not synchronized on all processors and doesn't
+ handle cpu clock frequency changes.
+
+ The useable cycle counters are:
+
+ x86/x86_64 - Timestamp Counter
+ alpha - Cycle Counter
+ sparc64 - %ticks register
+ ppc64 - Time base
+ ia64 - Interval Time Counter
+
+ Choose this if your CPU's cycle counter is working properly.
+
+endchoice
+
config NET_SCH_CBQ
tristate "CBQ packet scheduler"
depends on NET_SCHED
testing applications or protocols.
To compile this driver as a module, choose M here: the module
- will be called sch_delay.
+ will be called sch_netem.
+
+ If unsure, say N.
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
kfree(tp);
goto errout;
}
- write_lock(&qdisc_tree_lock);
- spin_lock_bh(&dev->queue_lock);
+
+ qdisc_lock_tree(dev);
tp->next = *back;
*back = tp;
- spin_unlock_bh(&dev->queue_lock);
- write_unlock(&qdisc_tree_lock);
+ qdisc_unlock_tree(dev);
+
} else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind))
goto errout;
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
- write_lock(&qdisc_tree_lock);
- spin_lock_bh(&dev->queue_lock);
+ qdisc_lock_tree(dev);
*back = tp->next;
- spin_unlock_bh(&dev->queue_lock);
- write_unlock(&qdisc_tree_lock);
+ qdisc_unlock_tree(dev);
+
tfilter_notify(skb, n, tp, fh_s, RTM_DELTFILTER);
tcf_destroy(tp);
err = 0;
return err;
}
+unsigned long tcf_set_class(struct tcf_proto *tp, unsigned long *clp,
+ unsigned long cl)
+{
+ unsigned long old_cl;
+
+ tcf_tree_lock(tp);
+ old_cl = __cls_set_class(clp, cl);
+ tcf_tree_unlock(tp);
+
+ return old_cl;
+}
+
+
static int
tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh,
u32 pid, u32 seq, unsigned flags, int event)
EXPORT_SYMBOL(register_tcf_proto_ops);
EXPORT_SYMBOL(unregister_tcf_proto_ops);
+EXPORT_SYMBOL(tcf_set_class);
struct tc_u_hnode *ht_up;
#ifdef CONFIG_NET_CLS_ACT
struct tc_action *action;
-#ifdef CONFIG_NET_CLS_IND
- char indev[IFNAMSIZ];
-#endif
#else
#ifdef CONFIG_NET_CLS_POLICE
struct tcf_police *police;
#endif
+#endif
+#ifdef CONFIG_NET_CLS_IND
+ char indev[IFNAMSIZ];
#endif
u8 fshift;
struct tcf_result res;
struct tc_u_hnode *ht_down;
+#ifdef CONFIG_CLS_U32_PERF
+ struct tc_u32_pcnt *pf;
+#endif
struct tc_u32_sel sel;
};
int sdepth = 0;
int off2 = 0;
int sel = 0;
+#ifdef CONFIG_CLS_U32_PERF
+ int j;
+#endif
int i;
next_ht:
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
- n->sel.rcnt +=1;
+ n->pf->rcnt +=1;
+ j = 0;
#endif
for (i = n->sel.nkeys; i>0; i--, key++) {
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
- key->kcnt +=1;
+ n->pf->kcnts[j] +=1;
+ j++;
#endif
}
if (n->ht_down == NULL) {
if (n->sel.flags&TC_U32_TERMINAL) {
*res = n->res;
-#ifdef CONFIG_NET_CLS_ACT
#ifdef CONFIG_NET_CLS_IND
/* yes, i know it sucks but the feature is
** optional dammit! - JHS */
}
#endif
#ifdef CONFIG_CLS_U32_PERF
- n->sel.rhit +=1;
+ n->pf->rhit +=1;
#endif
+#ifdef CONFIG_NET_CLS_ACT
if (n->action) {
int pol_res = tcf_action_exec(skb, n->action);
if (skb->tc_classid > 0) {
#endif
if (n->ht_down)
n->ht_down->refcnt--;
+#ifdef CONFIG_CLS_U32_PERF
+ if (n && (NULL != n->pf))
+ kfree(n->pf);
+#endif
kfree(n);
return 0;
}
tcf_action_destroy(act, TCA_ACT_UNBIND);
}
-#ifdef CONFIG_NET_CLS_IND
- n->indev[0] = 0;
- if(tb[TCA_U32_INDEV-1]) {
- struct rtattr *input_dev = tb[TCA_U32_INDEV-1];
- if (RTA_PAYLOAD(input_dev) >= IFNAMSIZ) {
- printk("cls_u32: bad indev name %s\n",(char*)RTA_DATA(input_dev));
- /* should we clear state first? */
- return -EINVAL;
- }
- sprintf(n->indev, "%s", (char*)RTA_DATA(input_dev));
- }
-#endif
#else
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(police, TCA_ACT_UNBIND);
}
#endif
+#endif
+#ifdef CONFIG_NET_CLS_IND
+ n->indev[0] = 0;
+ if(tb[TCA_U32_INDEV-1]) {
+ struct rtattr *input_dev = tb[TCA_U32_INDEV-1];
+ if (RTA_PAYLOAD(input_dev) >= IFNAMSIZ) {
+ printk("cls_u32: bad indev name %s\n",(char*)RTA_DATA(input_dev));
+ /* should we clear state first? */
+ return -EINVAL;
+ }
+ sprintf(n->indev, "%s", (char*)RTA_DATA(input_dev));
+ printk("got IND %s\n",n->indev);
+ }
#endif
return 0;
s = RTA_DATA(tb[TCA_U32_SEL-1]);
-#ifdef CONFIG_CLS_U32_PERF
- if (RTA_PAYLOAD(tb[TCA_U32_SEL-1]) <
- (s->nkeys*sizeof(struct tc_u32_key)) + sizeof(struct tc_u32_sel)) {
- printk("Please upgrade your iproute2 tools or compile proper options in!\n");
- return -EINVAL;
-}
-#endif
n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
if (n == NULL)
return -ENOBUFS;
+
memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));
+#ifdef CONFIG_CLS_U32_PERF
+ n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64), GFP_KERNEL);
+ if (n->pf == NULL) {
+ kfree(n);
+ return -ENOBUFS;
+ }
+ memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64));
+#endif
+
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
n->ht_up = ht;
n->handle = handle;
*arg = (unsigned long)n;
return 0;
}
+#ifdef CONFIG_CLS_U32_PERF
+ if (n && (NULL != n->pf))
+ kfree(n->pf);
+#endif
kfree(n);
return err;
}
p_rta->rta_len = skb->tail - (u8*)p_rta;
}
-#ifdef CONFIG_NET_CLS_IND
- if(strlen(n->indev)) {
- struct rtattr * p_rta = (struct rtattr*)skb->tail;
- RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
- p_rta->rta_len = skb->tail - (u8*)p_rta;
- }
-#endif
#else
#ifdef CONFIG_NET_CLS_POLICE
}
#endif
+#endif
+
+#ifdef CONFIG_NET_CLS_IND
+ if(strlen(n->indev)) {
+ struct rtattr * p_rta = (struct rtattr*)skb->tail;
+ RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
+ p_rta->rta_len = skb->tail - (u8*)p_rta;
+ }
+#endif
+#ifdef CONFIG_CLS_U32_PERF
+ RTA_PUT(skb, TCA_U32_PCNT,
+ sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(__u64),
+ n->pf);
#endif
}
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_ACT
- if (TC_U32_KEY(n->handle) && n->action && n->action->type == TCA_OLD_COMPAT) {
- if (tcf_action_copy_stats(skb,n->action))
- goto rtattr_failure;
+ if (TC_U32_KEY(n->handle) != 0) {
+ if (TC_U32_KEY(n->handle) && n->action && n->action->type == TCA_OLD_COMPAT) {
+ if (tcf_action_copy_stats(skb,n->action))
+ goto rtattr_failure;
+ }
}
#else
#ifdef CONFIG_NET_CLS_POLICE
static int __init init_u32(void)
{
+ printk("u32 classifier\n");
+#ifdef CONFIG_CLS_U32_PERF
+ printk(" Perfomance counters on\n");
+#endif
+#ifdef CONFIG_NET_CLS_POLICE
+ printk(" OLD policer on \n");
+#endif
+#ifdef CONFIG_NET_CLS_IND
+ printk(" input device check on \n");
+#endif
+#ifdef CONFIG_NET_CLS_ACT
+ printk(" Actions configured \n");
+#endif
return register_tcf_proto_ops(&cls_u32_ops);
}
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmod.h>
+#include <linux/list.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
{
struct Qdisc *q;
- for (q = dev->qdisc_list; q; q = q->next) {
+ list_for_each_entry(q, &dev->qdisc_list, list) {
if (q->handle == handle)
return q;
}
if (dev->flags & IFF_UP)
dev_deactivate(dev);
- write_lock(&qdisc_tree_lock);
- spin_lock_bh(&dev->queue_lock);
+ qdisc_lock_tree(dev);
if (qdisc && qdisc->flags&TCQ_F_INGRES) {
oqdisc = dev->qdisc_ingress;
/* Prune old scheduler */
dev->qdisc = &noop_qdisc;
}
- spin_unlock_bh(&dev->queue_lock);
- write_unlock(&qdisc_tree_lock);
+ qdisc_unlock_tree(dev);
if (dev->flags & IFF_UP)
dev_activate(dev);
memset(sch, 0, size);
+ INIT_LIST_HEAD(&sch->list);
skb_queue_head_init(&sch->q);
if (handle == TC_H_INGRESS)
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
+ dev_hold(dev);
atomic_set(&sch->refcnt, 1);
sch->stats_lock = &dev->queue_lock;
if (handle == 0) {
* before we set a netdevice's qdisc pointer to sch */
smp_wmb();
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
- write_lock(&qdisc_tree_lock);
- sch->next = dev->qdisc_list;
- dev->qdisc_list = sch;
- write_unlock(&qdisc_tree_lock);
+ qdisc_lock_tree(dev);
+ list_add_tail(&sch->list, &dev->qdisc_list);
+ qdisc_unlock_tree(dev);
+
#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
qdisc_new_estimator(&sch->stats, sch->stats_lock,
nlh->nlmsg_flags = flags;
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
- tcm->tcm_ifindex = q->dev ? q->dev->ifindex : 0;
+ tcm->tcm_ifindex = q->dev->ifindex;
tcm->tcm_parent = clid;
tcm->tcm_handle = q->handle;
tcm->tcm_info = atomic_read(&q->refcnt);
continue;
if (idx > s_idx)
s_q_idx = 0;
- read_lock(&qdisc_tree_lock);
- for (q = dev->qdisc_list, q_idx = 0; q;
- q = q->next, q_idx++) {
- if (q_idx < s_q_idx)
+ read_lock_bh(&qdisc_tree_lock);
+ q_idx = 0;
+ list_for_each_entry(q, &dev->qdisc_list, list) {
+ if (q_idx < s_q_idx) {
+ q_idx++;
continue;
+ }
if (tc_fill_qdisc(skb, q, 0, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) {
- read_unlock(&qdisc_tree_lock);
+ read_unlock_bh(&qdisc_tree_lock);
goto done;
}
+ q_idx++;
}
- read_unlock(&qdisc_tree_lock);
+ read_unlock_bh(&qdisc_tree_lock);
}
done:
nlh->nlmsg_flags = flags;
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
- tcm->tcm_ifindex = q->dev ? q->dev->ifindex : 0;
+ tcm->tcm_ifindex = q->dev->ifindex;
tcm->tcm_parent = q->handle;
tcm->tcm_handle = q->handle;
tcm->tcm_info = 0;
return 0;
s_t = cb->args[0];
-
- read_lock(&qdisc_tree_lock);
- for (q=dev->qdisc_list, t=0; q; q = q->next, t++) {
- if (t < s_t) continue;
- if (!q->ops->cl_ops) continue;
- if (tcm->tcm_parent && TC_H_MAJ(tcm->tcm_parent) != q->handle)
+ t = 0;
+
+ read_lock_bh(&qdisc_tree_lock);
+ list_for_each_entry(q, &dev->qdisc_list, list) {
+ if (t < s_t || !q->ops->cl_ops ||
+ (tcm->tcm_parent &&
+ TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
+ t++;
continue;
+ }
if (t > s_t)
memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
arg.w.fn = qdisc_class_dump;
cb->args[1] = arg.w.count;
if (arg.w.stop)
break;
+ t++;
}
- read_unlock(&qdisc_tree_lock);
+ read_unlock_bh(&qdisc_tree_lock);
cb->args[0] = t;
};
#endif
-#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
int psched_tod_diff(int delta_sec, int bound)
{
int delta;
EXPORT_SYMBOL(psched_tod_diff);
#endif
-psched_time_t psched_time_base;
-
-#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
+#ifdef CONFIG_NET_SCH_CLK_CPU
psched_tdiff_t psched_clock_per_hz;
int psched_clock_scale;
EXPORT_SYMBOL(psched_clock_per_hz);
EXPORT_SYMBOL(psched_clock_scale);
-#endif
-#ifdef PSCHED_WATCHER
-PSCHED_WATCHER psched_time_mark;
+psched_time_t psched_time_base;
+cycles_t psched_time_mark;
EXPORT_SYMBOL(psched_time_mark);
EXPORT_SYMBOL(psched_time_base);
+/*
+ * Periodically adjust psched_time_base to avoid overflow
+ * with 32-bit get_cycles(). Safe up to 4GHz CPU.
+ */
static void psched_tick(unsigned long);
-
static struct timer_list psched_timer = TIMER_INITIALIZER(psched_tick, 0, 0);
static void psched_tick(unsigned long dummy)
{
-#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
- psched_time_t dummy_stamp;
- PSCHED_GET_TIME(dummy_stamp);
- /* It is OK up to 4GHz cpu */
- psched_timer.expires = jiffies + 1*HZ;
-#else
- unsigned long now = jiffies;
- psched_time_base += ((u64)(now-psched_time_mark))<<PSCHED_JSCALE;
- psched_time_mark = now;
- psched_timer.expires = now + 60*60*HZ;
-#endif
- add_timer(&psched_timer);
+ if (sizeof(cycles_t) == sizeof(u32)) {
+ psched_time_t dummy_stamp;
+ PSCHED_GET_TIME(dummy_stamp);
+ psched_timer.expires = jiffies + 1*HZ;
+ add_timer(&psched_timer);
+ }
}
-#endif
-#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
int __init psched_calibrate_clock(void)
{
psched_time_t stamp, stamp1;
long rdelay;
unsigned long stop;
-#ifdef PSCHED_WATCHER
psched_tick(0);
-#endif
stop = jiffies + HZ/10;
PSCHED_GET_TIME(stamp);
do_gettimeofday(&tv);
{
struct rtnetlink_link *link_p;
-#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
+#ifdef CONFIG_NET_SCH_CLK_CPU
if (psched_calibrate_clock() < 0)
return -1;
-#elif PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
+#elif defined(CONFIG_NET_SCH_CLK_JIFFIES)
psched_tick_per_us = HZ<<PSCHED_JSCALE;
psched_us_per_tick = 1000000;
-#ifdef PSCHED_WATCHER
- psched_tick(0);
-#endif
#endif
link_p = rtnetlink_links[PF_UNSPEC];
*/
-#define PRIV(sch) ((struct atm_qdisc_data *) (sch)->data)
+#define PRIV(sch) qdisc_priv(sch)
#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
static __inline__ struct atm_flow_data *lookup_flow(struct Qdisc *sch,
u32 classid)
{
+ struct atm_qdisc_data *p = PRIV(sch);
struct atm_flow_data *flow;
- for (flow = PRIV(sch)->flows; flow; flow = flow->next)
+ for (flow = p->flows; flow; flow = flow->next)
if (flow->classid == classid) break;
return flow;
}
static struct cbq_class *
cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *head = &q->link;
struct cbq_class **defmap;
struct cbq_class *cl = NULL;
static __inline__ void cbq_activate_class(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
int prio = cl->cpriority;
struct cbq_class *cl_tail;
static void cbq_deactivate_class(struct cbq_class *this)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(this->qdisc);
int prio = this->cpriority;
struct cbq_class *cl;
struct cbq_class *cl_prev = q->active[prio];
static int
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
int len = skb->len;
int ret = NET_XMIT_SUCCESS;
struct cbq_class *cl = cbq_classify(skb, sch,&ret);
static int
cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
int ret;
static void cbq_ovl_classic(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
static void cbq_ovl_rclassic(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *this = cl;
do {
static void cbq_ovl_delay(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
static void cbq_ovl_lowprio(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
cl->penalized = jiffies + cl->penalty;
static void cbq_undelay(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
long delay = 0;
unsigned pmask;
{
int len = skb->len;
struct Qdisc *sch = child->__parent;
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = q->rx_class;
q->rx_class = NULL;
static __inline__ struct cbq_class *
cbq_under_limit(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *this_cl = cl;
if (cl->tparent == NULL)
static __inline__ struct sk_buff *
cbq_dequeue_prio(struct Qdisc *sch, int prio)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl_tail, *cl_prev, *cl;
struct sk_buff *skb;
int deficit;
static __inline__ struct sk_buff *
cbq_dequeue_1(struct Qdisc *sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
unsigned activemask;
cbq_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
psched_time_t now;
psched_tdiff_t incr;
static void cbq_sync_defmap(struct cbq_class *cl)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *split = cl->split;
unsigned h;
int i;
static void cbq_unlink_class(struct cbq_class *this)
{
struct cbq_class *cl, **clp;
- struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(this->qdisc);
for (clp = &q->classes[cbq_hash(this->classid)]; (cl = *clp) != NULL; clp = &cl->next) {
if (cl == this) {
static void cbq_link_class(struct cbq_class *this)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)this->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(this->qdisc);
unsigned h = cbq_hash(this->classid);
struct cbq_class *parent = this->tparent;
static unsigned int cbq_drop(struct Qdisc* sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl, *cl_head;
int prio;
unsigned int len;
static void
cbq_reset(struct Qdisc* sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
int prio;
unsigned h;
static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
if (wrr->allot)
cl->allot = wrr->allot;
static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct rtattr *tb[TCA_CBQ_MAX];
struct tc_ratespec *r;
static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct rtattr *rta;
cbq_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
- struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class*)arg;
unsigned char *b = skb->tail;
struct rtattr *rta;
static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = cbq_class_lookup(q, classid);
if (cl) {
static void
cbq_destroy(struct Qdisc* sch)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
unsigned h;
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_POLICE
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
spin_lock_bh(&sch->dev->queue_lock);
if (q->rx_class == cl)
unsigned long *arg)
{
int err;
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class*)*arg;
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct rtattr *tb[TCA_CBQ_MAX];
static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class*)arg;
if (cl->filters || cl->children || cl == &q->link)
static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
if (cl == NULL)
static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *p = (struct cbq_class*)parent;
struct cbq_class *cl = cbq_class_lookup(q, classid);
static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_sched_data *q = qdisc_priv(sch);
unsigned h;
if (arg->stop)
#endif
-#define PRIV(sch) ((struct dsmark_qdisc_data *) (sch)->data)
+#define PRIV(sch) qdisc_priv(sch)
/*
tcf_destroy(tp);
}
qdisc_destroy(p->q);
- p->q = &noop_qdisc;
kfree(p->mask);
}
static int
bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct fifo_sched_data *q = (struct fifo_sched_data *)sch->data;
+ struct fifo_sched_data *q = qdisc_priv(sch);
if (sch->stats.backlog + skb->len <= q->limit) {
__skb_queue_tail(&sch->q, skb);
static int
pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct fifo_sched_data *q = (struct fifo_sched_data *)sch->data;
+ struct fifo_sched_data *q = qdisc_priv(sch);
if (sch->q.qlen < q->limit) {
__skb_queue_tail(&sch->q, skb);
static int fifo_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct fifo_sched_data *q = (void*)sch->data;
+ struct fifo_sched_data *q = qdisc_priv(sch);
if (opt == NULL) {
unsigned int limit = sch->dev->tx_queue_len ? : 1;
static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct fifo_sched_data *q = (void*)sch->data;
+ struct fifo_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct tc_fifo_qopt opt;
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
+#include <linux/list.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
The idea is the following:
- enqueue, dequeue are serialized via top level device
spinlock dev->queue_lock.
- - tree walking is protected by read_lock(qdisc_tree_lock)
+ - tree walking is protected by read_lock_bh(qdisc_tree_lock)
and this lock is used only in process context.
- - updates to tree are made only under rtnl semaphore,
- hence this lock may be made without local bh disabling.
+ - updates to tree are made under rtnl semaphore or
+ from softirq context (__qdisc_destroy rcu-callback)
+ hence this lock needs local bh disabling.
qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
*/
rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED;
+void qdisc_lock_tree(struct net_device *dev)
+{
+ write_lock_bh(&qdisc_tree_lock);
+ spin_lock_bh(&dev->queue_lock);
+}
+
+void qdisc_unlock_tree(struct net_device *dev)
+{
+ spin_unlock_bh(&dev->queue_lock);
+ write_unlock_bh(&qdisc_tree_lock);
+}
+
/*
dev->queue_lock serializes queue accesses for this device
AND dev->qdisc pointer itself.
static int
pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- struct sk_buff_head *list;
+ struct sk_buff_head *list = qdisc_priv(qdisc);
- list = ((struct sk_buff_head*)qdisc->data) +
- prio2band[skb->priority&TC_PRIO_MAX];
+ list += prio2band[skb->priority&TC_PRIO_MAX];
if (list->qlen < qdisc->dev->tx_queue_len) {
__skb_queue_tail(list, skb);
pfifo_fast_dequeue(struct Qdisc* qdisc)
{
int prio;
- struct sk_buff_head *list = ((struct sk_buff_head*)qdisc->data);
+ struct sk_buff_head *list = qdisc_priv(qdisc);
struct sk_buff *skb;
for (prio = 0; prio < 3; prio++, list++) {
static int
pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- struct sk_buff_head *list;
+ struct sk_buff_head *list = qdisc_priv(qdisc);
- list = ((struct sk_buff_head*)qdisc->data) +
- prio2band[skb->priority&TC_PRIO_MAX];
+ list += prio2band[skb->priority&TC_PRIO_MAX];
__skb_queue_head(list, skb);
qdisc->q.qlen++;
pfifo_fast_reset(struct Qdisc* qdisc)
{
int prio;
- struct sk_buff_head *list = ((struct sk_buff_head*)qdisc->data);
+ struct sk_buff_head *list = qdisc_priv(qdisc);
for (prio=0; prio < 3; prio++)
skb_queue_purge(list+prio);
static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
{
int i;
- struct sk_buff_head *list;
-
- list = ((struct sk_buff_head*)qdisc->data);
+ struct sk_buff_head *list = qdisc_priv(qdisc);
for (i=0; i<3; i++)
skb_queue_head_init(list+i);
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
{
+ void *p;
struct Qdisc *sch;
- int size = sizeof(*sch) + ops->priv_size;
+ int size;
+
+ /* ensure that the Qdisc and the private data are 32-byte aligned */
+ size = ((sizeof(*sch) + QDISC_ALIGN_CONST) & ~QDISC_ALIGN_CONST);
+ size += ops->priv_size + QDISC_ALIGN_CONST;
- sch = kmalloc(size, GFP_KERNEL);
- if (!sch)
+ p = kmalloc(size, GFP_KERNEL);
+ if (!p)
return NULL;
- memset(sch, 0, size);
+ memset(p, 0, size);
+ sch = (struct Qdisc *)(((unsigned long)p + QDISC_ALIGN_CONST)
+ & ~QDISC_ALIGN_CONST);
+ sch->padded = (char *)sch - (char *)p;
+
+ INIT_LIST_HEAD(&sch->list);
skb_queue_head_init(&sch->q);
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
+ dev_hold(dev);
sch->stats_lock = &dev->queue_lock;
atomic_set(&sch->refcnt, 1);
/* enqueue is accessed locklessly - make sure it's visible
if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
- kfree(sch);
+ kfree(p);
return NULL;
}
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator(&qdisc->stats);
#endif
+ write_lock(&qdisc_tree_lock);
if (ops->reset)
ops->reset(qdisc);
if (ops->destroy)
ops->destroy(qdisc);
+ write_unlock(&qdisc_tree_lock);
module_put(ops->owner);
+ dev_put(qdisc->dev);
if (!(qdisc->flags&TCQ_F_BUILTIN))
- kfree(qdisc);
+ kfree((char *) qdisc - qdisc->padded);
}
/* Under dev->queue_lock and BH! */
void qdisc_destroy(struct Qdisc *qdisc)
{
- struct net_device *dev = qdisc->dev;
-
if (!atomic_dec_and_test(&qdisc->refcnt))
return;
-
- if (dev) {
- struct Qdisc *q, **qp;
- for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next) {
- if (q == qdisc) {
- *qp = q->next;
- break;
- }
- }
- }
-
+ list_del(&qdisc->list);
call_rcu(&qdisc->q_rcu, __qdisc_destroy);
-
}
-
void dev_activate(struct net_device *dev)
{
/* No queueing discipline is attached to device;
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
-
- write_lock(&qdisc_tree_lock);
- qdisc->next = dev->qdisc_list;
- dev->qdisc_list = qdisc;
- write_unlock(&qdisc_tree_lock);
-
+ write_lock_bh(&qdisc_tree_lock);
+ list_add_tail(&qdisc->list, &dev->qdisc_list);
+ write_unlock_bh(&qdisc_tree_lock);
} else {
qdisc = &noqueue_qdisc;
}
- write_lock(&qdisc_tree_lock);
+ write_lock_bh(&qdisc_tree_lock);
dev->qdisc_sleeping = qdisc;
- write_unlock(&qdisc_tree_lock);
+ write_unlock_bh(&qdisc_tree_lock);
}
spin_lock_bh(&dev->queue_lock);
void dev_init_scheduler(struct net_device *dev)
{
- write_lock(&qdisc_tree_lock);
- spin_lock_bh(&dev->queue_lock);
+ qdisc_lock_tree(dev);
dev->qdisc = &noop_qdisc;
- spin_unlock_bh(&dev->queue_lock);
dev->qdisc_sleeping = &noop_qdisc;
- dev->qdisc_list = NULL;
- write_unlock(&qdisc_tree_lock);
+ INIT_LIST_HEAD(&dev->qdisc_list);
+ qdisc_unlock_tree(dev);
dev_watchdog_init(dev);
}
{
struct Qdisc *qdisc;
- write_lock(&qdisc_tree_lock);
- spin_lock_bh(&dev->queue_lock);
+ qdisc_lock_tree(dev);
qdisc = dev->qdisc_sleeping;
dev->qdisc = &noop_qdisc;
dev->qdisc_sleeping = &noop_qdisc;
qdisc_destroy(qdisc);
}
#endif
- BUG_TRAP(dev->qdisc_list == NULL);
BUG_TRAP(!timer_pending(&dev->watchdog_timer));
- dev->qdisc_list = NULL;
- spin_unlock_bh(&dev->queue_lock);
- write_unlock(&qdisc_tree_lock);
+ qdisc_unlock_tree(dev);
}
EXPORT_SYMBOL(__netdev_watchdog_up);
EXPORT_SYMBOL(qdisc_destroy);
EXPORT_SYMBOL(qdisc_reset);
EXPORT_SYMBOL(qdisc_restart);
-EXPORT_SYMBOL(qdisc_tree_lock);
+EXPORT_SYMBOL(qdisc_lock_tree);
+EXPORT_SYMBOL(qdisc_unlock_tree);
{
psched_time_t now;
struct gred_sched_data *q=NULL;
- struct gred_sched *t= (struct gred_sched *)sch->data;
+ struct gred_sched *t= qdisc_priv(sch);
unsigned long qave=0;
int i=0;
gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
struct gred_sched_data *q;
- struct gred_sched *t= (struct gred_sched *)sch->data;
+ struct gred_sched *t= qdisc_priv(sch);
q= t->tab[(skb->tc_index&0xf)];
/* error checking here -- probably unnecessary */
PSCHED_SET_PASTPERFECT(q->qidlestart);
{
struct sk_buff *skb;
struct gred_sched_data *q;
- struct gred_sched *t= (struct gred_sched *)sch->data;
+ struct gred_sched *t= qdisc_priv(sch);
skb = __skb_dequeue(&sch->q);
if (skb) {
struct sk_buff *skb;
struct gred_sched_data *q;
- struct gred_sched *t= (struct gred_sched *)sch->data;
+ struct gred_sched *t= qdisc_priv(sch);
skb = __skb_dequeue_tail(&sch->q);
if (skb) {
{
int i;
struct gred_sched_data *q;
- struct gred_sched *t= (struct gred_sched *)sch->data;
+ struct gred_sched *t= qdisc_priv(sch);
__skb_queue_purge(&sch->q);
static int gred_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct gred_sched *table = (struct gred_sched *)sch->data;
+ struct gred_sched *table = qdisc_priv(sch);
struct gred_sched_data *q;
struct tc_gred_qopt *ctl;
struct tc_gred_sopt *sopt;
static int gred_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct gred_sched *table = (struct gred_sched *)sch->data;
+ struct gred_sched *table = qdisc_priv(sch);
struct tc_gred_sopt *sopt;
struct rtattr *tb[TCA_GRED_STAB];
struct rtattr *tb2[TCA_GRED_DPS];
struct rtattr *rta;
struct tc_gred_qopt *opt = NULL ;
struct tc_gred_qopt *dst;
- struct gred_sched *table = (struct gred_sched *)sch->data;
+ struct gred_sched *table = qdisc_priv(sch);
struct gred_sched_data *q;
int i;
unsigned char *b = skb->tail;
static void gred_destroy(struct Qdisc *sch)
{
- struct gred_sched *table = (struct gred_sched *)sch->data;
+ struct gred_sched *table = qdisc_priv(sch);
int i;
for (i = 0;i < table->DPs; i++) {
/*
* macros
*/
-#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
#include <linux/time.h>
#undef PSCHED_GET_TIME
#define PSCHED_GET_TIME(stamp) \
* ism: (psched_us/byte) << ISM_SHIFT
* dx: psched_us
*
- * Time source resolution
- * PSCHED_JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
- * PSCHED_CPU: resolution is between 0.5us and 1us.
- * PSCHED_GETTIMEOFDAY: resolution is exactly 1us.
+ * Clock source resolution (CONFIG_NET_SCH_CLK_*)
+ * JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
+ * CPU: resolution is between 0.5us and 1us.
+ * GETTIMEOFDAY: resolution is exactly 1us.
*
* sm and ism are scaled in order to keep effective digits.
* SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
static inline struct hfsc_class *
hfsc_find_class(u32 classid, struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct rtattr **tca, unsigned long *arg)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl = (struct hfsc_class *)*arg;
struct hfsc_class *parent = NULL;
struct rtattr *opt = tca[TCA_OPTIONS-1];
static void
hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
hfsc_destroy_filters(&cl->filter_list);
qdisc_destroy(cl->qdisc);
static int
hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl = (struct hfsc_class *)arg;
if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
static struct hfsc_class *
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
struct tcf_result res;
struct tcf_proto *tcf;
static struct tcf_proto **
hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl = (struct hfsc_class *)arg;
if (cl == NULL)
static void
hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
unsigned int i;
static void
hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
u64 next_time = 0;
long delay;
static int
hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct tc_hfsc_qopt *qopt;
unsigned int i;
static int
hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct tc_hfsc_qopt *qopt;
if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
static void
hfsc_reset_qdisc(struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
unsigned int i;
static void
hfsc_destroy_qdisc(struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl, *next;
unsigned int i;
static int
hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct tc_hfsc_qopt qopt;
static struct sk_buff *
hfsc_dequeue(struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
struct sk_buff *skb;
u64 cur_time;
static int
hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
__skb_queue_head(&q->requeue, skb);
sch->q.qlen++;
static unsigned int
hfsc_drop(struct Qdisc *sch)
{
- struct hfsc_sched *q = (struct hfsc_sched *)sch->data;
+ struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
unsigned int len;
/* find class in global hash table using given handle */
static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct list_head *p;
if (TC_H_MAJ(handle) != sch->handle)
return NULL;
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qres)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
struct tcf_result res;
struct tcf_proto *tcf;
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
int ret = NET_XMIT_SUCCESS;
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_classify(skb,sch,&ret);
/* TODO: requeuing packet charges it to policers again !! */
static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
int ret = NET_XMIT_SUCCESS;
struct htb_class *cl = htb_classify(skb,sch, &ret);
struct sk_buff *tskb;
static void htb_rate_timer(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct list_head *p;
/* lock queue so that we can muck with it */
if (net_ratelimit())
printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
cl->classid, diff,
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+ q->now.tv_sec * 1000000ULL + q->now.tv_usec,
+ cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
+#else
(unsigned long long) q->now,
(unsigned long long) cl->t_c,
+#endif
q->jiffies);
diff = 1000;
}
if (net_ratelimit())
printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
cl->classid, diff,
+#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
+ q->now.tv_sec * 1000000ULL + q->now.tv_usec,
+ cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
+#else
(unsigned long long) q->now,
(unsigned long long) cl->t_c,
+#endif
q->jiffies);
diff = 1000;
}
static void htb_delay_by(struct Qdisc *sch,long delay)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
if (delay <= 0) delay = 1;
if (unlikely(delay > 5*HZ)) {
if (net_ratelimit())
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb = NULL;
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
int level;
long min_delay;
#ifdef HTB_DEBUG
/* try to drop from each class (by prio) until one succeed */
static unsigned int htb_drop(struct Qdisc* sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
int prio;
for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
/* always caled under BH & queue lock */
static void htb_reset(struct Qdisc* sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
int i;
HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
static int htb_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct htb_sched *q = (struct htb_sched*)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct rtattr *tb[TCA_HTB_INIT];
struct tc_htb_glob *gopt;
int i;
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct htb_sched *q = (struct htb_sched*)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_htb_glob gopt;
struct sk_buff *skb, struct tcmsg *tcm)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = (struct htb_sched*)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
#endif
struct htb_class *cl = (struct htb_class*)arg;
unsigned char *b = skb->tail;
sch_tree_lock(sch);
if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
if (cl->prio_activity)
- htb_deactivate ((struct htb_sched*)sch->data,cl);
+ htb_deactivate (qdisc_priv(sch),cl);
/* TODO: is it correct ? Why CBQ doesn't do it ? */
sch->q.qlen -= (*old)->q.qlen;
static unsigned long htb_get(struct Qdisc *sch, u32 classid)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
#endif
struct htb_class *cl = htb_find(classid,sch);
HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
if (!cl->level) {
BUG_TRAP(cl->un.leaf.q);
/* always caled under BH & queue lock */
static void htb_destroy(struct Qdisc* sch)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
HTB_DBG(0,1,"htb_destroy q=%p\n",q);
del_timer_sync (&q->timer);
static int htb_delete(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class*)arg;
HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
static void htb_put(struct Qdisc *sch, unsigned long arg)
{
#ifdef HTB_DEBUG
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
#endif
struct htb_class *cl = (struct htb_class*)arg;
HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
u32 parentid, struct rtattr **tca, unsigned long *arg)
{
int err = -EINVAL;
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class*)*arg,*parent;
struct rtattr *opt = tca[TCA_OPTIONS-1];
struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_find (classid,sch);
HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
/*if (cl && !cl->level) return 0;
static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
if (cl)
static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct htb_sched *q = (struct htb_sched *)sch->data;
+ struct htb_sched *q = qdisc_priv(sch);
int i;
if (arg->stop)
#endif
-#define PRIV(sch) ((struct ingress_qdisc_data *) (sch)->data)
+#define PRIV(sch) qdisc_priv(sch)
/* Thanks to Doron Oz for this hack
*/
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
+ struct netem_sched_data *q = qdisc_priv(sch);
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
psched_time_t now;
long delay;
PSCHED_TADD2(now, delay, cb->time_to_send);
/* Always queue at tail to keep packets in order */
- __skb_queue_tail(&q->delayed, skb);
- sch->q.qlen++;
- sch->stats.bytes += skb->len;
- sch->stats.packets++;
- return 0;
+ if (likely(q->delayed.qlen < q->limit)) {
+ __skb_queue_tail(&q->delayed, skb);
+ sch->q.qlen++;
+ sch->stats.bytes += skb->len;
+ sch->stats.packets++;
+ return 0;
+ }
+
+ sch->stats.drops++;
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
}
/* Requeue packets but don't change time stamp */
static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
+ struct netem_sched_data *q = qdisc_priv(sch);
int ret;
if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
static unsigned int netem_drop(struct Qdisc* sch)
{
- struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
+ struct netem_sched_data *q = qdisc_priv(sch);
unsigned int len;
if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
*/
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
- struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
+ struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
psched_time_t now;
static void netem_reset(struct Qdisc *sch)
{
- struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
+ struct netem_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
skb_queue_purge(&q->delayed);
static int netem_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
+ struct netem_sched_data *q = qdisc_priv(sch);
struct tc_netem_qopt *qopt = RTA_DATA(opt);
struct Qdisc *child;
int ret;
static int netem_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
+ struct netem_sched_data *q = qdisc_priv(sch);
if (!opt)
return -EINVAL;
static void netem_destroy(struct Qdisc *sch)
{
- struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
+ struct netem_sched_data *q = qdisc_priv(sch);
del_timer_sync(&q->timer);
+ qdisc_destroy(q->qdisc);
}
static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct netem_sched_data *q = (struct netem_sched_data *)sch->data;
+ struct netem_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct tc_netem_qopt qopt;
qopt.latency = q->latency;
qopt.jitter = q->jitter;
- qopt.limit = sch->dev->tx_queue_len;
+ qopt.limit = q->limit;
qopt.loss = q->loss;
qopt.gap = q->gap;
return -1;
}
+static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+
+ if (cl != 1) /* only one class */
+ return -ENOENT;
+
+ tcm->tcm_handle |= TC_H_MIN(1);
+ tcm->tcm_info = q->qdisc->handle;
+
+ return 0;
+}
+
+static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ struct Qdisc **old)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+
+ if (new == NULL)
+ new = &noop_qdisc;
+
+ sch_tree_lock(sch);
+ *old = xchg(&q->qdisc, new);
+ qdisc_reset(*old);
+ sch->q.qlen = 0;
+ sch_tree_unlock(sch);
+
+ return 0;
+}
+
+static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+ return q->qdisc;
+}
+
+static unsigned long netem_get(struct Qdisc *sch, u32 classid)
+{
+ return 1;
+}
+
+static void netem_put(struct Qdisc *sch, unsigned long arg)
+{
+}
+
+static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ struct rtattr **tca, unsigned long *arg)
+{
+ return -ENOSYS;
+}
+
+static int netem_delete(struct Qdisc *sch, unsigned long arg)
+{
+ return -ENOSYS;
+}
+
+static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+ if (!walker->stop) {
+ if (walker->count >= walker->skip)
+ if (walker->fn(sch, 1, walker) < 0) {
+ walker->stop = 1;
+ return;
+ }
+ walker->count++;
+ }
+}
+
+static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+ return NULL;
+}
+
+static struct Qdisc_class_ops netem_class_ops = {
+ .graft = netem_graft,
+ .leaf = netem_leaf,
+ .get = netem_get,
+ .put = netem_put,
+ .change = netem_change_class,
+ .delete = netem_delete,
+ .walk = netem_walk,
+ .tcf_chain = netem_find_tcf,
+ .dump = netem_dump_class,
+};
+
static struct Qdisc_ops netem_qdisc_ops = {
.id = "netem",
+ .cl_ops = &netem_class_ops,
.priv_size = sizeof(struct netem_sched_data),
.enqueue = netem_enqueue,
.dequeue = netem_dequeue,
struct Qdisc *prio_classify(struct sk_buff *skb, struct Qdisc *sch,int *r)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
u32 band = skb->priority;
struct tcf_result res;
prio_dequeue(struct Qdisc* sch)
{
struct sk_buff *skb;
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
int prio;
struct Qdisc *qdisc;
static unsigned int prio_drop(struct Qdisc* sch)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
int prio;
unsigned int len;
struct Qdisc *qdisc;
prio_reset(struct Qdisc* sch)
{
int prio;
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
for (prio=0; prio<q->bands; prio++)
qdisc_reset(q->queues[prio]);
prio_destroy(struct Qdisc* sch)
{
int prio;
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
struct tcf_proto *tp;
while ((tp = q->filter_list) != NULL) {
tcf_destroy(tp);
}
- for (prio=0; prio<q->bands; prio++) {
+ for (prio=0; prio<q->bands; prio++)
qdisc_destroy(q->queues[prio]);
- q->queues[prio] = &noop_qdisc;
- }
}
static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
struct tc_prio_qopt *qopt = RTA_DATA(opt);
int i;
static int prio_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
int i;
for (i=0; i<TCQ_PRIO_BANDS; i++)
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct tc_prio_qopt opt;
static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
if (band >= q->bands)
static struct Qdisc *
prio_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
if (band >= q->bands)
static unsigned long prio_get(struct Qdisc *sch, u32 classid)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = TC_H_MIN(classid);
if (band - 1 >= q->bands)
static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct rtattr **tca, unsigned long *arg)
{
unsigned long cl = *arg;
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
if (cl - 1 > q->bands)
return -ENOENT;
static int prio_delete(struct Qdisc *sch, unsigned long cl)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
if (cl - 1 > q->bands)
return -ENOENT;
return 0;
static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
struct tcmsg *tcm)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
if (cl - 1 > q->bands)
return -ENOENT;
static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
int prio;
if (arg->stop)
static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
{
- struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct prio_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
static int
red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct red_sched_data *q = (struct red_sched_data *)sch->data;
+ struct red_sched_data *q = qdisc_priv(sch);
psched_time_t now;
static int
red_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct red_sched_data *q = (struct red_sched_data *)sch->data;
+ struct red_sched_data *q = qdisc_priv(sch);
PSCHED_SET_PASTPERFECT(q->qidlestart);
red_dequeue(struct Qdisc* sch)
{
struct sk_buff *skb;
- struct red_sched_data *q = (struct red_sched_data *)sch->data;
+ struct red_sched_data *q = qdisc_priv(sch);
skb = __skb_dequeue(&sch->q);
if (skb) {
static unsigned int red_drop(struct Qdisc* sch)
{
struct sk_buff *skb;
- struct red_sched_data *q = (struct red_sched_data *)sch->data;
+ struct red_sched_data *q = qdisc_priv(sch);
skb = __skb_dequeue_tail(&sch->q);
if (skb) {
static void red_reset(struct Qdisc* sch)
{
- struct red_sched_data *q = (struct red_sched_data *)sch->data;
+ struct red_sched_data *q = qdisc_priv(sch);
__skb_queue_purge(&sch->q);
sch->stats.backlog = 0;
static int red_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct red_sched_data *q = (struct red_sched_data *)sch->data;
+ struct red_sched_data *q = qdisc_priv(sch);
struct rtattr *tb[TCA_RED_STAB];
struct tc_red_qopt *ctl;
static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct red_sched_data *q = (struct red_sched_data *)sch->data;
+ struct red_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_red_qopt opt;
return -1;
}
-static void red_destroy(struct Qdisc *sch)
-{
-}
-
static struct Qdisc_ops red_qdisc_ops = {
.next = NULL,
.cl_ops = NULL,
.drop = red_drop,
.init = red_init,
.reset = red_reset,
- .destroy = red_destroy,
.change = red_change,
.dump = red_dump,
.owner = THIS_MODULE,
static unsigned int sfq_drop(struct Qdisc *sch)
{
- struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
+ struct sfq_sched_data *q = qdisc_priv(sch);
sfq_index d = q->max_depth;
struct sk_buff *skb;
unsigned int len;
static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
+ struct sfq_sched_data *q = qdisc_priv(sch);
unsigned hash = sfq_hash(q, skb);
sfq_index x;
static int
sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
+ struct sfq_sched_data *q = qdisc_priv(sch);
unsigned hash = sfq_hash(q, skb);
sfq_index x;
static struct sk_buff *
sfq_dequeue(struct Qdisc* sch)
{
- struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
+ struct sfq_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
sfq_index a, old_a;
static void sfq_perturbation(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
- struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
+ struct sfq_sched_data *q = qdisc_priv(sch);
q->perturbation = net_random()&0x1F;
q->perturb_timer.expires = jiffies + q->perturb_period;
static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
{
- struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
+ struct sfq_sched_data *q = qdisc_priv(sch);
struct tc_sfq_qopt *ctl = RTA_DATA(opt);
if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
{
- struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
+ struct sfq_sched_data *q = qdisc_priv(sch);
int i;
init_timer(&q->perturb_timer);
static void sfq_destroy(struct Qdisc *sch)
{
- struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
+ struct sfq_sched_data *q = qdisc_priv(sch);
del_timer(&q->perturb_timer);
}
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
+ struct sfq_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct tc_sfq_qopt opt;
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
int ret;
if (skb->len > q->max_size) {
static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
int ret;
if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
static unsigned int tbf_drop(struct Qdisc* sch)
{
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
unsigned int len;
if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
{
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
skb = q->qdisc->dequeue(q->qdisc);
static void tbf_reset(struct Qdisc* sch)
{
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
sch->q.qlen = 0;
static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
{
int err = -EINVAL;
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
struct rtattr *tb[TCA_TBF_PTAB];
struct tc_tbf_qopt *qopt;
struct qdisc_rate_table *rtab = NULL;
static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
{
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
if (opt == NULL)
return -EINVAL;
static void tbf_destroy(struct Qdisc *sch)
{
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
del_timer(&q->wd_timer);
qdisc_put_rtab(q->R_tab);
qdisc_destroy(q->qdisc);
- q->qdisc = &noop_qdisc;
}
static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
{
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_tbf_qopt opt;
static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
- struct tbf_sched_data *q = (struct tbf_sched_data*)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
if (cl != 1) /* only one class */
return -ENOENT;
static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
if (new == NULL)
new = &noop_qdisc;
static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ struct tbf_sched_data *q = qdisc_priv(sch);
return q->qdisc;
}
struct sk_buff_head q;
};
-#define NEXT_SLAVE(q) (((struct teql_sched_data*)((q)->data))->next)
+#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT|IFF_BROADCAST)
teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct net_device *dev = sch->dev;
- struct teql_sched_data *q = (struct teql_sched_data *)sch->data;
+ struct teql_sched_data *q = qdisc_priv(sch);
__skb_queue_tail(&q->q, skb);
if (q->q.qlen <= dev->tx_queue_len) {
static int
teql_requeue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct teql_sched_data *q = (struct teql_sched_data *)sch->data;
+ struct teql_sched_data *q = qdisc_priv(sch);
__skb_queue_head(&q->q, skb);
return 0;
static struct sk_buff *
teql_dequeue(struct Qdisc* sch)
{
- struct teql_sched_data *dat = (struct teql_sched_data *)sch->data;
+ struct teql_sched_data *dat = qdisc_priv(sch);
struct sk_buff *skb;
skb = __skb_dequeue(&dat->q);
static void
teql_reset(struct Qdisc* sch)
{
- struct teql_sched_data *dat = (struct teql_sched_data *)sch->data;
+ struct teql_sched_data *dat = qdisc_priv(sch);
skb_queue_purge(&dat->q);
sch->q.qlen = 0;
teql_destroy(struct Qdisc* sch)
{
struct Qdisc *q, *prev;
- struct teql_sched_data *dat = (struct teql_sched_data *)sch->data;
+ struct teql_sched_data *dat = qdisc_priv(sch);
struct teql_master *master = dat->m;
if ((prev = master->slaves) != NULL) {
{
struct net_device *dev = sch->dev;
struct teql_master *m = (struct teql_master*)sch->ops;
- struct teql_sched_data *q = (struct teql_sched_data *)sch->data;
+ struct teql_sched_data *q = qdisc_priv(sch);
if (dev->hard_header_len > m->dev->hard_header_len)
return -EINVAL;
static int
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
{
- struct teql_sched_data *q = (void*)dev->qdisc->data;
+ struct teql_sched_data *q = qdisc_priv(dev->qdisc);
struct neighbour *mn = skb->dst->neighbour;
struct neighbour *n = q->ncache;
config IP_SCTP
tristate "The SCTP Protocol (EXPERIMENTAL)"
depends on IPV6 || IPV6=n
+ select CRYPTO if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5
+ select CRYPTO_HMAC if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5
+ select CRYPTO_SHA1 if SCTP_HMAC_SHA1
+ select CRYPTO_MD5 if SCTP_HMAC_MD5
---help---
Stream Control Transmission Protocol
config SCTP_HMAC_SHA1
bool "HMAC-SHA1"
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_SHA1
help
Enable the use of HMAC-SHA1 during association establishment. It
is advised to use either HMAC-MD5 or HMAC-SHA1.
config SCTP_HMAC_MD5
bool "HMAC-MD5"
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_MD5
help
Enable the use of HMAC-MD5 during association establishment. It is
advised to use either HMAC-MD5 or HMAC-SHA1.
if (sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
else
- SCTP_INC_STATS(SctpInCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_RECEIVED:
+ case SCTP_STATE_SHUTDOWN_SENT:
if ((asoc->rwnd > asoc->a_rwnd) &&
((asoc->rwnd - asoc->a_rwnd) >=
min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pmtu)))
offset = 0;
if ((whole > 1) || (whole && over))
- SCTP_INC_STATS_USER(SctpFragUsrMsgs);
+ SCTP_INC_STATS_USER(SCTP_MIB_FRAGUSRMSGS);
/* Create chunks for all the full sized DATA chunks. */
for (i=0, len=first_len; i < whole; i++) {
if (asoc && sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
else
- SCTP_INC_STATS(SctpInCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
if (val != cmp) {
/* CRC failure, dump it. */
- SCTP_INC_STATS_BH(SctpChecksumErrors);
+ SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS);
return -1;
}
return 0;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
- SCTP_INC_STATS_BH(SctpInSCTPPacks);
+ SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
sh = (struct sctphdr *) skb->h.raw;
if (!asoc) {
ep = __sctp_rcv_lookup_endpoint(&dest);
if (sctp_rcv_ootb(skb)) {
- SCTP_INC_STATS_BH(SctpOutOfBlues);
+ SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
goto discard_release;
}
}
if (asoc) {
if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) {
- ICMP_INC_STATS_BH(IcmpInErrors);
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
goto out;
}
sk = asoc->base.sk;
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
- NET_INC_STATS_BH(LockDroppedIcmps);
+ NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
*epp = ep;
*app = asoc;
int err;
if (skb->len < ((iph->ihl << 2) + 8)) {
- ICMP_INC_STATS_BH(IcmpInErrors);
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
return;
}
skb->nh.raw = saveip;
skb->h.raw = savesctp;
if (!sk) {
- ICMP_INC_STATS_BH(IcmpInErrors);
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
return;
}
/* Warning: The sock lock is held. Remember to call
skb->nh.raw = saveip;
skb->h.raw = savesctp;
if (!sk) {
- ICMP6_INC_STATS_BH(idev, Icmp6InErrors);
+ ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
goto out;
}
__FUNCTION__, skb, skb->len,
NIP6(fl.fl6_src), NIP6(fl.fl6_dst));
- SCTP_INC_STATS(SctpOutSCTPPacks);
+ SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
return ip6_xmit(sk, skb, &fl, np->opt, ipfragok);
}
return err;
no_route:
kfree_skb(nskb);
- IP_INC_STATS_BH(OutNoRoutes);
+ IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
/* FIXME: Returning the 'err' will effect all the associations
* associated with a socket, although only one of the paths of the
sctp_outq_tail_data(q, chunk);
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(SctpOutUnorderChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
else
- SCTP_INC_STATS(SctpOutOrderChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
q->empty = 0;
break;
};
} else {
__skb_queue_tail(&q->control, (struct sk_buff *) chunk);
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
}
if (error < 0)
int rtx_timeout, int *start_timer)
{
struct list_head *lqueue;
- struct list_head *lchunk;
+ struct list_head *lchunk, *lchunk1;
struct sctp_transport *transport = pkt->transport;
sctp_xmit_t status;
- struct sctp_chunk *chunk;
+ struct sctp_chunk *chunk, *chunk1;
struct sctp_association *asoc;
int error = 0;
* the transmitted list.
*/
list_add_tail(lchunk, &transport->transmitted);
+
+ /* Mark the chunk as ineligible for fast retransmit
+ * after it is retransmitted.
+ */
+ chunk->fast_retransmit = 0;
+
*start_timer = 1;
q->empty = 0;
lchunk = sctp_list_dequeue(lqueue);
break;
};
+
+ /* If we are here due to a retransmit timeout or a fast
+ * retransmit and if there are any chunks left in the retransmit
+ * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit.
+ */
+ if (rtx_timeout && !lchunk) {
+ list_for_each(lchunk1, lqueue) {
+ chunk1 = list_entry(lchunk1, struct sctp_chunk,
+ transmitted_list);
+ chunk1->fast_retransmit = 0;
+ }
+ }
}
return error;
if (ftsn_chunk) {
__skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk);
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
}
}
#include <linux/init.h>
#include <net/sctp/sctp.h>
-static char *sctp_snmp_list[] = {
-#define SCTP_SNMP_ENTRY(x) #x
- SCTP_SNMP_ENTRY(SctpCurrEstab),
- SCTP_SNMP_ENTRY(SctpActiveEstabs),
- SCTP_SNMP_ENTRY(SctpPassiveEstabs),
- SCTP_SNMP_ENTRY(SctpAborteds),
- SCTP_SNMP_ENTRY(SctpShutdowns),
- SCTP_SNMP_ENTRY(SctpOutOfBlues),
- SCTP_SNMP_ENTRY(SctpChecksumErrors),
- SCTP_SNMP_ENTRY(SctpOutCtrlChunks),
- SCTP_SNMP_ENTRY(SctpOutOrderChunks),
- SCTP_SNMP_ENTRY(SctpOutUnorderChunks),
- SCTP_SNMP_ENTRY(SctpInCtrlChunks),
- SCTP_SNMP_ENTRY(SctpInOrderChunks),
- SCTP_SNMP_ENTRY(SctpInUnorderChunks),
- SCTP_SNMP_ENTRY(SctpFragUsrMsgs),
- SCTP_SNMP_ENTRY(SctpReasmUsrMsgs),
- SCTP_SNMP_ENTRY(SctpOutSCTPPacks),
- SCTP_SNMP_ENTRY(SctpInSCTPPacks),
-#undef SCTP_SNMP_ENTRY
+struct snmp_mib sctp_snmp_list[] = {
+ SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB),
+ SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS),
+ SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS),
+ SNMP_MIB_ITEM("SctpAborteds", SCTP_MIB_ABORTEDS),
+ SNMP_MIB_ITEM("SctpShutdowns", SCTP_MIB_SHUTDOWNS),
+ SNMP_MIB_ITEM("SctpOutOfBlues", SCTP_MIB_OUTOFBLUES),
+ SNMP_MIB_ITEM("SctpChecksumErrors", SCTP_MIB_CHECKSUMERRORS),
+ SNMP_MIB_ITEM("SctpOutCtrlChunks", SCTP_MIB_OUTCTRLCHUNKS),
+ SNMP_MIB_ITEM("SctpOutOrderChunks", SCTP_MIB_OUTORDERCHUNKS),
+ SNMP_MIB_ITEM("SctpOutUnorderChunks", SCTP_MIB_OUTUNORDERCHUNKS),
+ SNMP_MIB_ITEM("SctpInCtrlChunks", SCTP_MIB_INCTRLCHUNKS),
+ SNMP_MIB_ITEM("SctpInOrderChunks", SCTP_MIB_INORDERCHUNKS),
+ SNMP_MIB_ITEM("SctpInUnorderChunks", SCTP_MIB_INUNORDERCHUNKS),
+ SNMP_MIB_ITEM("SctpFragUsrMsgs", SCTP_MIB_FRAGUSRMSGS),
+ SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS),
+ SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS),
+ SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS),
};
/* Return the current value of a particular entry in the mib by adding its
{
int i;
- for (i = 0; i < sizeof(sctp_snmp_list) / sizeof(char *); i++)
- seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i],
- fold_field((void **)sctp_statistics, i));
+ for (i = 0; sctp_snmp_list[i].name != NULL; i++)
+ seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
+ fold_field((void **)sctp_statistics,
+ sctp_snmp_list[i].entry));
return 0;
}
NIPQUAD(((struct rtable *)skb->dst)->rt_src),
NIPQUAD(((struct rtable *)skb->dst)->rt_dst));
- SCTP_INC_STATS(SctpOutSCTPPacks);
+ SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
return ip_queue_xmit(skb, ipfragok);
}
if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
goto clean_up;
spin_lock_bh(&sctp_assocs_id_lock);
- error = idr_get_new(&sctp_assocs_id,
- (void *)asoc,
- &assoc_id);
+ error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, 1,
+ &assoc_id);
spin_unlock_bh(&sctp_assocs_id_lock);
if (error == -EAGAIN)
goto retry;
}
}
+/* Helper function to stop any pending T3-RTX timers */
+static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
+ struct sctp_association *asoc)
+{
+ struct sctp_transport *t;
+ struct list_head *pos;
+
+ list_for_each(pos, &asoc->peer.transport_addr_list) {
+ t = list_entry(pos, struct sctp_transport, transports);
+ if (timer_pending(&t->T3_rtx_timer) &&
+ del_timer(&t->T3_rtx_timer)) {
+ sctp_transport_put(t);
+ }
+ }
+}
+
+
/* Helper function to update the heartbeat timer. */
static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
return;
}
+/* Helper function to remove the association non-primary peer
+ * transports.
+ */
+static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
+{
+ struct sctp_transport *t;
+ struct list_head *pos;
+ struct list_head *temp;
+
+ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
+ t = list_entry(pos, struct sctp_transport, transports);
+ if (!sctp_cmp_addr_exact(&t->ipaddr,
+ &asoc->peer.primary_addr)) {
+ sctp_assoc_del_peer(asoc, &t->ipaddr);
+ }
+ }
+
+ return;
+}
+
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
if (cmd->obj.ptr)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(cmd->obj.ptr));
+
+ /* FIXME - Eventually come up with a cleaner way to
+ * enabling COOKIE-ECHO + DATA bundling during
+ * multihoming stale cookie scenarios, the following
+ * command plays with asoc->peer.retran_path to
+ * avoid the problem of sending the COOKIE-ECHO and
+ * DATA in different paths, which could result
+ * in the association being ABORTed if the DATA chunk
+ * is processed first by the server. Checking the
+ * init error counter simply causes this command
+ * to be executed only during failed attempts of
+ * association establishment.
+ */
+ if ((asoc->peer.retran_path !=
+ asoc->peer.primary_path) &&
+ (asoc->counters[SCTP_COUNTER_INIT_ERROR] > 0)) {
+ sctp_add_cmd_sf(commands,
+ SCTP_CMD_FORCE_PRIM_RETRAN,
+ SCTP_NULL());
+ }
+
break;
case SCTP_CMD_GEN_SHUTDOWN:
case SCTP_CMD_CLEAR_INIT_TAG:
asoc->peer.i.init_tag = 0;
break;
+ case SCTP_CMD_DEL_NON_PRIMARY:
+ sctp_cmd_del_non_primary(asoc);
+ break;
+ case SCTP_CMD_T3_RTX_TIMERS_STOP:
+ sctp_cmd_t3_rtx_timers_stop(commands, asoc);
+ break;
+ case SCTP_CMD_FORCE_PRIM_RETRAN:
+ t = asoc->peer.retran_path;
+ asoc->peer.retran_path = asoc->peer.primary_path;
+ error = sctp_outq_uncork(&asoc->outqueue);
+ local_cork = 0;
+ asoc->peer.retran_path = t;
+ break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SctpShutdowns);
- SCTP_DEC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
return SCTP_DISPOSITION_CONSUME;
} else {
return SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SctpAborteds);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
- SCTP_INC_STATS(SctpAborteds);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
- sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_RESET,
- SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SctpCurrEstab);
- SCTP_INC_STATS(SctpPassiveEstabs);
+ SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (new_asoc->autoclose)
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ /* Reset init error count upon receipt of COOKIE-ACK,
+ * to avoid problems with the managemement of this
+ * counter in stale cookie situations when a transition back
+ * from the COOKIE-ECHOED state to the COOKIE-WAIT
+ * state is performed.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_RESET,
+ SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR));
+
/* RFC 2960 5.1 Normal Establishment of an Association
*
* E) Upon reception of the COOKIE ACK, endpoint "A" will move
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SctpCurrEstab);
- SCTP_INC_STATS(SctpActiveEstabs);
+ SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (asoc->autoclose)
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SctpAborteds);
- SCTP_DEC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
goto out;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
/* Discard the rest of the inbound packet. */
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
retval = SCTP_DISPOSITION_CONSUME;
} else {
retval = SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
- SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
SCTP_NULL());
time_t stale;
sctp_cookie_preserve_param_t bht;
sctp_errhdr_t *err;
- struct list_head *pos;
- struct sctp_transport *t;
struct sctp_chunk *reply;
struct sctp_bind_addr *bp;
int attempts;
/* Clear peer's init_tag cached in assoc as we are sending a new INIT */
sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL());
+ /* Stop pending T3-rtx and heartbeat timers */
+ sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
+
+ /* Delete non-primary peer ip addresses since we are transitioning
+ * back to the COOKIE-WAIT state
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL());
+
+ /* If we've sent any data bundled with COOKIE-ECHO we will need to
+ * resend
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN,
+ SCTP_TRANSPORT(asoc->peer.primary_path));
+
/* Cast away the const modifier, as we want to just
* rerun it through as a sideffect.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_INC,
SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR));
- /* If we've sent any data bundled with COOKIE-ECHO we need to
- * resend.
- */
- list_for_each(pos, &asoc->peer.transport_addr_list) {
- t = list_entry(pos, struct sctp_transport, transports);
- sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(t));
- }
-
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
- SCTP_INC_STATS(SctpAborteds);
- SCTP_DEC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SctpAborteds);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
- sctp_datahdr_t *data_hdr;
- struct sctp_chunk *err;
- size_t datalen;
- sctp_verb_t deliver;
- int tmp;
- __u32 tsn;
+ int error;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
- data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
- skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
-
- tsn = ntohl(data_hdr->tsn);
- SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn);
-
- /* ASSERT: Now skb->data is really the user data. */
-
- /* Process ECN based congestion.
- *
- * Since the chunk structure is reused for all chunks within
- * a packet, we use ecn_ce_done to track if we've already
- * done CE processing for this packet.
- *
- * We need to do ECN processing even if we plan to discard the
- * chunk later.
- */
-
- if (!chunk->ecn_ce_done) {
- struct sctp_af *af;
- chunk->ecn_ce_done = 1;
-
- af = sctp_get_af_specific(
- ipver2af(chunk->skb->nh.iph->version));
-
- if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
- /* Do real work as sideffect. */
- sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
- SCTP_U32(tsn));
- }
- }
-
- tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
- if (tmp < 0) {
- /* The TSN is too high--silently discard the chunk and
- * count on it getting retransmitted later.
- */
+ error = sctp_eat_data(asoc, chunk, commands );
+ switch (error) {
+ case SCTP_IERROR_NO_ERROR:
+ break;
+ case SCTP_IERROR_HIGH_TSN:
+ case SCTP_IERROR_BAD_STREAM:
goto discard_noforce;
- } else if (tmp > 0) {
- /* This is a duplicate. Record it. */
- sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
+ case SCTP_IERROR_DUP_TSN:
+ case SCTP_IERROR_IGNORE_TSN:
goto discard_force;
+ case SCTP_IERROR_NO_DATA:
+ goto consume;
+ default:
+ BUG();
}
- /* This is a new TSN. */
-
- /* Discard if there is no room in the receive window.
- * Actually, allow a little bit of overflow (up to a MTU).
- */
- datalen = ntohs(chunk->chunk_hdr->length);
- datalen -= sizeof(sctp_data_chunk_t);
-
- deliver = SCTP_CMD_CHUNK_ULP;
-
- /* Think about partial delivery. */
- if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
-
- /* Even if we don't accept this chunk there is
- * memory pressure.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
- }
-
- /* Spill over rwnd a little bit. Note: While allowed, this spill over
- * seems a bit troublesome in that frag_point varies based on
- * PMTU. In cases, such as loopback, this might be a rather
- * large spill over.
- */
- if (!asoc->rwnd || asoc->rwnd_over ||
- (datalen > asoc->rwnd + asoc->frag_point)) {
-
- /* If this is the next TSN, consider reneging to make
- * room. Note: Playing nice with a confused sender. A
- * malicious sender can still eat up all our buffer
- * space and in the future we may want to detect and
- * do more drastic reneging.
- */
- if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
- (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
- SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
- deliver = SCTP_CMD_RENEGE;
- } else {
- SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
- "rwnd: %d\n", tsn, datalen,
- asoc->rwnd);
- goto discard_force;
- }
- }
-
- /*
- * Section 3.3.10.9 No User Data (9)
- *
- * Cause of error
- * ---------------
- * No User Data: This error cause is returned to the originator of a
- * DATA chunk if a received DATA chunk has no user data.
- */
- if (unlikely(0 == datalen)) {
- err = sctp_make_abort_no_data(asoc, chunk, tsn);
- if (err) {
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
- SCTP_CHUNK(err));
- }
- /* We are going to ABORT, so we might as well stop
- * processing the rest of the chunks in the packet.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
- sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
- SCTP_U32(SCTP_ERROR_NO_DATA));
- SCTP_INC_STATS(SctpAborteds);
- SCTP_DEC_STATS(SctpCurrEstab);
- return SCTP_DISPOSITION_CONSUME;
- }
-
- /* If definately accepting the DATA chunk, record its TSN, otherwise
- * wait for renege processing.
- */
- if (SCTP_CMD_CHUNK_ULP == deliver)
- sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
-
- /* Note: Some chunks may get overcounted (if we drop) or overcounted
- * if we renege and the chunk arrives again.
- */
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(SctpInUnorderChunks);
- else
- SCTP_INC_STATS(SctpInOrderChunks);
-
- /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
- *
- * If an endpoint receive a DATA chunk with an invalid stream
- * identifier, it shall acknowledge the reception of the DATA chunk
- * following the normal procedure, immediately send an ERROR chunk
- * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
- * and discard the DATA chunk.
- */
- if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) {
- err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
- &data_hdr->stream,
- sizeof(data_hdr->stream));
- if (err)
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
- SCTP_CHUNK(err));
- goto discard_noforce;
- }
-
- /* Send the data up to the user. Note: Schedule the
- * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
- * chunk needs the updated rwnd.
- */
- sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
-
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
}
return SCTP_DISPOSITION_DISCARD;
+consume:
+ return SCTP_DISPOSITION_CONSUME;
+
}
/*
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
- sctp_datahdr_t *data_hdr;
- struct sctp_chunk *err;
- size_t datalen;
- int tmp;
- __u32 tsn;
+ int error;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
- data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *) chunk->skb->data;
- skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
-
- tsn = ntohl(data_hdr->tsn);
-
- SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn);
-
- /* ASSERT: Now skb->data is really the user data. */
-
- /* Process ECN based congestion.
- *
- * Since the chunk structure is reused for all chunks within
- * a packet, we use ecn_ce_done to track if we've already
- * done CE processing for this packet.
- *
- * We need to do ECN processing even if we plan to discard the
- * chunk later.
- */
- if (!chunk->ecn_ce_done) {
- struct sctp_af *af;
- chunk->ecn_ce_done = 1;
-
- af = sctp_get_af_specific(
- ipver2af(chunk->skb->nh.iph->version));
-
- if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
- /* Do real work as sideffect. */
- sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
- SCTP_U32(tsn));
- }
- }
-
- tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
- if (tmp < 0) {
- /* The TSN is too high--silently discard the chunk and
- * count on it getting retransmitted later.
- */
- goto gen_shutdown;
- } else if (tmp > 0) {
- /* This is a duplicate. Record it. */
- sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
- goto gen_shutdown;
- }
-
- /* This is a new TSN. */
-
- datalen = ntohs(chunk->chunk_hdr->length);
- datalen -= sizeof(sctp_data_chunk_t);
-
- /*
- * Section 3.3.10.9 No User Data (9)
- *
- * Cause of error
- * ---------------
- * No User Data: This error cause is returned to the originator of a
- * DATA chunk if a received DATA chunk has no user data.
- */
- if (unlikely(0 == datalen)) {
- err = sctp_make_abort_no_data(asoc, chunk, tsn);
- if (err) {
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
- SCTP_CHUNK(err));
- }
- /* We are going to ABORT, so we might as well stop
- * processing the rest of the chunks in the packet.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
- sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
- SCTP_U32(SCTP_ERROR_NO_DATA));
- SCTP_INC_STATS(SctpAborteds);
- SCTP_DEC_STATS(SctpCurrEstab);
- return SCTP_DISPOSITION_CONSUME;
- }
-
- /* We are accepting this DATA chunk. */
-
- /* Record the fact that we have received this TSN. */
- sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
-
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(SctpInUnorderChunks);
- else
- SCTP_INC_STATS(SctpInOrderChunks);
-
- /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
- *
- * If an endpoint receive a DATA chunk with an invalid stream
- * identifier, it shall acknowledge the reception of the DATA chunk
- * following the normal procedure, immediately send an ERROR chunk
- * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
- * and discard the DATA chunk.
- */
- if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) {
- err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
- &data_hdr->stream,
- sizeof(data_hdr->stream));
- if (err) {
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
- SCTP_CHUNK(err));
- }
+ error = sctp_eat_data(asoc, chunk, commands );
+ switch (error) {
+ case SCTP_IERROR_NO_ERROR:
+ case SCTP_IERROR_HIGH_TSN:
+ case SCTP_IERROR_DUP_TSN:
+ case SCTP_IERROR_IGNORE_TSN:
+ case SCTP_IERROR_BAD_STREAM:
+ break;
+ case SCTP_IERROR_NO_DATA:
+ goto consume;
+ default:
+ BUG();
}
/* Go a head and force a SACK, since we are shutting down. */
-gen_shutdown:
+
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
}
+
+consume:
return SCTP_DISPOSITION_CONSUME;
}
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
return SCTP_DISPOSITION_CONSUME;
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SctpShutdowns);
- SCTP_DEC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
/* ...and remove all record of the association. */
__u8 *ch_end;
int ootb_shut_ack = 0;
- SCTP_INC_STATS(SctpOutOfBlues);
+ SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
return SCTP_DISPOSITION_CONSUME;
}
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
- SCTP_INC_STATS(SctpAborteds);
- SCTP_DEC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
- SCTP_INC_STATS(SctpAborteds);
- SCTP_DEC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_USER_ABORT));
- SCTP_INC_STATS(SctpAborteds);
- SCTP_DEC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return retval;
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SctpShutdowns);
+ SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SctpAborteds);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SctpAborteds);
- SCTP_DEC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SctpAborteds);
- SCTP_DEC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
- SCTP_INC_STATS(SctpAborteds);
- SCTP_INC_STATS(SctpCurrEstab);
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
num_blocks = ntohs(sack->num_gap_ack_blocks);
num_dup_tsns = ntohs(sack->num_dup_tsns);
len = sizeof(struct sctp_sackhdr);
- len = (num_blocks + num_dup_tsns) * sizeof(__u32);
+ len += (num_blocks + num_dup_tsns) * sizeof(__u32);
if (len > chunk->skb->len)
return NULL;
sctp_packet_append_chunk(packet, err_chunk);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
} else
sctp_chunk_free (err_chunk);
}
}
+
+
+/* Process a data chunk */
+int sctp_eat_data(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ sctp_cmd_seq_t *commands)
+{
+ sctp_datahdr_t *data_hdr;
+ struct sctp_chunk *err;
+ size_t datalen;
+ sctp_verb_t deliver;
+ int tmp;
+ __u32 tsn;
+
+ data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
+ skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
+
+ tsn = ntohl(data_hdr->tsn);
+ SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn);
+
+ /* ASSERT: Now skb->data is really the user data. */
+
+ /* Process ECN based congestion.
+ *
+ * Since the chunk structure is reused for all chunks within
+ * a packet, we use ecn_ce_done to track if we've already
+ * done CE processing for this packet.
+ *
+ * We need to do ECN processing even if we plan to discard the
+ * chunk later.
+ */
+
+ if (!chunk->ecn_ce_done) {
+ struct sctp_af *af;
+ chunk->ecn_ce_done = 1;
+
+ af = sctp_get_af_specific(
+ ipver2af(chunk->skb->nh.iph->version));
+
+ if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
+ /* Do real work as sideffect. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
+ SCTP_U32(tsn));
+ }
+ }
+
+ tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
+ if (tmp < 0) {
+ /* The TSN is too high--silently discard the chunk and
+ * count on it getting retransmitted later.
+ */
+ return SCTP_IERROR_HIGH_TSN;
+ } else if (tmp > 0) {
+ /* This is a duplicate. Record it. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
+ return SCTP_IERROR_DUP_TSN;
+ }
+
+ /* This is a new TSN. */
+
+ /* Discard if there is no room in the receive window.
+ * Actually, allow a little bit of overflow (up to a MTU).
+ */
+ datalen = ntohs(chunk->chunk_hdr->length);
+ datalen -= sizeof(sctp_data_chunk_t);
+
+ deliver = SCTP_CMD_CHUNK_ULP;
+
+ /* Think about partial delivery. */
+ if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
+
+ /* Even if we don't accept this chunk there is
+ * memory pressure.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
+ }
+
+ /* Spill over rwnd a little bit. Note: While allowed, this spill over
+ * seems a bit troublesome in that frag_point varies based on
+ * PMTU. In cases, such as loopback, this might be a rather
+ * large spill over.
+ */
+ if (!asoc->rwnd || asoc->rwnd_over ||
+ (datalen > asoc->rwnd + asoc->frag_point)) {
+
+ /* If this is the next TSN, consider reneging to make
+ * room. Note: Playing nice with a confused sender. A
+ * malicious sender can still eat up all our buffer
+ * space and in the future we may want to detect and
+ * do more drastic reneging.
+ */
+ if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
+ (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
+ SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
+ deliver = SCTP_CMD_RENEGE;
+ } else {
+ SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
+ "rwnd: %d\n", tsn, datalen,
+ asoc->rwnd);
+ return SCTP_IERROR_IGNORE_TSN;
+ }
+ }
+
+ /*
+ * Section 3.3.10.9 No User Data (9)
+ *
+ * Cause of error
+ * ---------------
+ * No User Data: This error cause is returned to the originator of a
+ * DATA chunk if a received DATA chunk has no user data.
+ */
+ if (unlikely(0 == datalen)) {
+ err = sctp_make_abort_no_data(asoc, chunk, tsn);
+ if (err) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ }
+ /* We are going to ABORT, so we might as well stop
+ * processing the rest of the chunks in the packet.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_U32(SCTP_ERROR_NO_DATA));
+ SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ return SCTP_IERROR_NO_DATA;
+ }
+
+ /* If definately accepting the DATA chunk, record its TSN, otherwise
+ * wait for renege processing.
+ */
+ if (SCTP_CMD_CHUNK_ULP == deliver)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
+
+ /* Note: Some chunks may get overcounted (if we drop) or overcounted
+ * if we renege and the chunk arrives again.
+ */
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
+ else
+ SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
+
+ /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
+ *
+ * If an endpoint receive a DATA chunk with an invalid stream
+ * identifier, it shall acknowledge the reception of the DATA chunk
+ * following the normal procedure, immediately send an ERROR chunk
+ * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
+ * and discard the DATA chunk.
+ */
+ if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) {
+ err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
+ &data_hdr->stream,
+ sizeof(data_hdr->stream));
+ if (err)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ return SCTP_IERROR_BAD_STREAM;
+ }
+
+ /* Send the data up to the user. Note: Schedule the
+ * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
+ * chunk needs the updated rwnd.
+ */
+ sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
+
+ return SCTP_IERROR_NO_ERROR;
+}
if (copy_from_user(¶ms, optval, optlen))
return -EFAULT;
+ /*
+ * API 7. Socket Options (setting the default value for the endpoint)
+ * All options that support specific settings on an association by
+ * filling in either an association id variable or a sockaddr_storage
+ * SHOULD also support setting of the same value for the entire endpoint
+ * (i.e. future associations). To accomplish this the following logic is
+ * used when setting one of these options:
+
+ * c) If neither the sockaddr_storage or association identification is
+ * set i.e. the sockaddr_storage is set to all 0's (INADDR_ANY) and
+ * the association identification is 0, the settings are a default
+ * and to be applied to the endpoint (all future associations).
+ */
+
+ /* update default value for endpoint (all future associations) */
+ if (!params.spp_assoc_id &&
+ sctp_is_any(( union sctp_addr *)¶ms.spp_address)) {
+ if (params.spp_hbinterval)
+ sctp_sk(sk)->paddrparam.spp_hbinterval =
+ params.spp_hbinterval;
+ if (sctp_max_retrans_path)
+ sctp_sk(sk)->paddrparam.spp_pathmaxrxt =
+ params.spp_pathmaxrxt;
+ return 0;
+ }
+
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
params.spp_assoc_id);
if (!trans)
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
+ /* If no association id is specified retrieve the default value
+ * for the endpoint that will be used for all future associations
+ */
+ if (!params.spp_assoc_id &&
+ sctp_is_any(( union sctp_addr *)¶ms.spp_address)) {
+ params.spp_hbinterval = sctp_sk(sk)->paddrparam.spp_hbinterval;
+ params.spp_pathmaxrxt = sctp_sk(sk)->paddrparam.spp_pathmaxrxt;
+
+ goto done;
+ }
+
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
params.spp_assoc_id);
if (!trans)
*/
params.spp_pathmaxrxt = trans->error_threshold;
+done:
if (copy_to_user(optval, ¶ms, len))
return -EFAULT;
};
event = sctp_skb2event(f_frag);
- SCTP_INC_STATS(SctpReasmUsrMsgs);
+ SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
return event;
}
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <net/tux.h>
#include <linux/wanrouter.h>
#include <linux/if_bridge.h>
#include <linux/init.h>
* in the operation structures but are done directly via the socketcall() multiplexor.
*/
-static struct file_operations socket_file_ops = {
+struct file_operations socket_file_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.aio_read = sock_aio_read,
* but we take care of internal coherence yet.
*/
-int sock_map_fd(struct socket *sock)
+struct file * sock_map_file(struct socket *sock)
{
- int fd;
+ struct file *file;
struct qstr this;
char name[32];
- /*
- * Find a file descriptor suitable for return to the user.
- */
+ file = get_empty_filp();
- fd = get_unused_fd();
- if (fd >= 0) {
- struct file *file = get_empty_filp();
+ if (!file)
+ return ERR_PTR(-ENFILE);
- if (!file) {
- put_unused_fd(fd);
- fd = -ENFILE;
- goto out;
- }
-
- sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino);
- this.name = name;
- this.len = strlen(name);
- this.hash = SOCK_INODE(sock)->i_ino;
+ sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino);
+ this.name = name;
+ this.len = strlen(name);
+ this.hash = SOCK_INODE(sock)->i_ino;
- file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this);
- if (!file->f_dentry) {
- put_filp(file);
- put_unused_fd(fd);
- fd = -ENOMEM;
- goto out;
- }
- file->f_dentry->d_op = &sockfs_dentry_operations;
- d_add(file->f_dentry, SOCK_INODE(sock));
- file->f_vfsmnt = mntget(sock_mnt);
- file->f_mapping = file->f_dentry->d_inode->i_mapping;
-
- sock->file = file;
- file->f_op = SOCK_INODE(sock)->i_fop = &socket_file_ops;
- file->f_mode = 3;
- file->f_flags = O_RDWR;
- file->f_pos = 0;
- fd_install(fd, file);
+ file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this);
+ if (!file->f_dentry) {
+ put_filp(file);
+ return ERR_PTR(-ENOMEM);
}
+ file->f_dentry->d_op = &sockfs_dentry_operations;
+ d_add(file->f_dentry, SOCK_INODE(sock));
+ file->f_vfsmnt = mntget(sock_mnt);
+file->f_mapping = file->f_dentry->d_inode->i_mapping;
+
+ if (sock->file)
+ BUG();
+ sock->file = file;
+ file->f_op = SOCK_INODE(sock)->i_fop = &socket_file_ops;
+ file->f_mode = FMODE_READ | FMODE_WRITE;
+ file->f_flags = O_RDWR;
+ file->f_pos = 0;
+
+ return file;
+}
-out:
+int sock_map_fd(struct socket *sock)
+{
+ int fd;
+ struct file *file;
+
+ /*
+ * Find a file descriptor suitable for return to the user.
+ */
+
+ fd = get_unused_fd();
+ if (fd < 0)
+ return fd;
+
+ file = sock_map_file(sock);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ return PTR_ERR(file);
+ }
+ fd_install(fd, file);
+
return fd;
}
return ret;
}
+int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
+ struct kvec *vec, size_t num, size_t size)
+{
+ mm_segment_t oldfs = get_fs();
+ int result;
+
+ set_fs(KERNEL_DS);
+ /*
+ * the following is safe, since for compiler definitions of kvec and
+ * iovec are identical, yielding the same in-core layout and alignment
+ */
+ msg->msg_iov = (struct iovec *)vec,
+ msg->msg_iovlen = num;
+ result = sock_sendmsg(sock, msg, size);
+ set_fs(oldfs);
+ return result;
+}
static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
return ret;
}
+int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
+ struct kvec *vec, size_t num,
+ size_t size, int flags)
+{
+ mm_segment_t oldfs = get_fs();
+ int result;
+
+ set_fs(KERNEL_DS);
+ /*
+ * the following is safe, since for compiler definitions of kvec and
+ * iovec are identical, yielding the same in-core layout and alignment
+ */
+ msg->msg_iov = (struct iovec *)vec,
+ msg->msg_iovlen = num;
+ result = sock_recvmsg(sock, msg, size, flags);
+ set_fs(oldfs);
+ return result;
+}
+
static void sock_aio_dtor(struct kiocb *iocb)
{
kfree(iocb->private);
struct socket *sock;
int flags;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
sock = SOCKET_I(file->f_dentry->d_inode);
flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
}
out:
+ if (sock->sk != sk)
+ BUG();
release_sock(sock->sk);
return 0;
}
#endif
}
+int tux_Dprintk;
+int tux_TDprintk;
+
+#ifdef CONFIG_TUX_MODULE
+
+asmlinkage long (*sys_tux_ptr) (unsigned int action, user_req_t *u_info) = NULL;
+
+struct module *tux_module = NULL;
+spinlock_t tux_module_lock = SPIN_LOCK_UNLOCKED;
+
+asmlinkage long sys_tux (unsigned int action, user_req_t *u_info)
+{
+ int ret;
+
+ if (current->tux_info)
+ return sys_tux_ptr(action, u_info);
+
+ ret = -ENOSYS;
+ spin_lock(&tux_module_lock);
+ if (!tux_module)
+ goto out_unlock;
+ if (!try_module_get(tux_module))
+ goto out_unlock;
+ spin_unlock(&tux_module_lock);
+
+ if (!sys_tux_ptr)
+ TUX_BUG();
+ ret = sys_tux_ptr(action, u_info);
+
+ spin_lock(&tux_module_lock);
+ module_put(tux_module);
+out_unlock:
+ spin_unlock(&tux_module_lock);
+
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(tux_module);
+EXPORT_SYMBOL_GPL(tux_module_lock);
+EXPORT_SYMBOL_GPL(sys_tux_ptr);
+
+EXPORT_SYMBOL_GPL(tux_Dprintk);
+EXPORT_SYMBOL_GPL(tux_TDprintk);
+
+#endif
#ifdef CONFIG_PROC_FS
void socket_seq_show(struct seq_file *seq)
{
EXPORT_SYMBOL(sock_unregister);
EXPORT_SYMBOL(sock_wake_async);
EXPORT_SYMBOL(sockfd_lookup);
+EXPORT_SYMBOL(kernel_sendmsg);
+EXPORT_SYMBOL(kernel_recvmsg);
static ssize_t
gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
- char *dst, size_t buflen)
+ char __user *dst, size_t buflen)
{
char *data = (char *)msg->data + msg->copied;
ssize_t mlen = msg->len;
#define MSG_BUF_MAXSIZE 1024
static ssize_t
-gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
+gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{
struct xdr_netobj obj = {
.len = mlen,
struct rpc_rqst *req = task->tk_rqstp;
u32 maj_stat = 0;
struct xdr_netobj mic;
- struct iovec iov;
+ struct kvec iov;
struct xdr_buf verf_buf;
u32 service;
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
u32 seq, qop_state;
- struct iovec iov;
+ struct kvec iov;
struct xdr_buf verf_buf;
struct xdr_netobj mic;
u32 flav,len;
u32 *integ_len = NULL;
struct xdr_netobj mic;
u32 offset, *q;
- struct iovec *iov;
+ struct kvec *iov;
u32 maj_stat = 0;
int status = -EIO;
}
static inline int
-svc_safe_getnetobj(struct iovec *argv, struct xdr_netobj *o)
+svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o)
{
int l;
}
static inline int
-svc_safe_putnetobj(struct iovec *resv, struct xdr_netobj *o)
+svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
{
u32 *p;
struct xdr_buf rpchdr;
struct xdr_netobj checksum;
u32 flavor = 0;
- struct iovec *argv = &rqstp->rq_arg.head[0];
- struct iovec iov;
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec iov;
/* data to compute the checksum over: */
iov.iov_base = rpcstart;
struct xdr_buf verf_data;
struct xdr_netobj mic;
u32 *p;
- struct iovec iov;
+ struct kvec iov;
svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_GSS));
xdr_seq = htonl(seq);
static int
svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
{
- struct iovec *argv = &rqstp->rq_arg.head[0];
- struct iovec *resv = &rqstp->rq_res.head[0];
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec *resv = &rqstp->rq_res.head[0];
u32 crlen;
struct xdr_netobj tmpobj;
struct gss_svc_data *svcdata = rqstp->rq_auth_data;
struct xdr_buf *resbuf = &rqstp->rq_res;
struct xdr_buf integ_buf;
struct xdr_netobj mic;
- struct iovec *resv;
+ struct kvec *resv;
u32 *p;
int integ_offset, integ_len;
int stat = -EINVAL;
struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
int err;
- if (ppos != &filp->f_pos)
- return -ESPIPE;
-
if (count == 0)
return 0;
int err;
struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
- if (ppos != &filp->f_pos)
- return -ESPIPE;
-
if (count == 0)
return 0;
if (count >= sizeof(write_buf))
{
struct cache_reader *rp = NULL;
+ nonseekable_open(inode, filp);
if (filp->f_mode & FMODE_READ) {
struct cache_detail *cd = PDE(inode)->data;
}
static struct file_operations cache_flush_operations = {
+ .open = nonseekable_open,
.read = read_flush,
.write = write_flush,
};
struct svc_program *progp;
struct svc_version *versp = NULL; /* compiler food */
struct svc_procedure *procp = NULL;
- struct iovec * argv = &rqstp->rq_arg.head[0];
- struct iovec * resv = &rqstp->rq_res.head[0];
+ struct kvec * argv = &rqstp->rq_arg.head[0];
+ struct kvec * resv = &rqstp->rq_res.head[0];
kxdrproc_t xdr;
u32 *statp;
u32 dir, prog, vers, proc,
static int
svcauth_null_accept(struct svc_rqst *rqstp, u32 *authp)
{
- struct iovec *argv = &rqstp->rq_arg.head[0];
- struct iovec *resv = &rqstp->rq_res.head[0];
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec *resv = &rqstp->rq_res.head[0];
int rv=0;
struct ip_map key, *ipm;
int
svcauth_unix_accept(struct svc_rqst *rqstp, u32 *authp)
{
- struct iovec *argv = &rqstp->rq_arg.head[0];
- struct iovec *resv = &rqstp->rq_res.head[0];
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec *resv = &rqstp->rq_res.head[0];
struct svc_cred *cred = &rqstp->rq_cred;
u32 slen, i;
int len = argv->iov_len;
* Generic recvfrom routine.
*/
static int
-svc_recvfrom(struct svc_rqst *rqstp, struct iovec *iov, int nr, int buflen)
+svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
{
- mm_segment_t oldfs;
struct msghdr msg;
struct socket *sock;
int len, alen;
msg.msg_name = &rqstp->rq_addr;
msg.msg_namelen = sizeof(rqstp->rq_addr);
- msg.msg_iov = iov;
- msg.msg_iovlen = nr;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = MSG_DONTWAIT;
- oldfs = get_fs(); set_fs(KERNEL_DS);
- len = sock_recvmsg(sock, &msg, buflen, MSG_DONTWAIT);
- set_fs(oldfs);
+ len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT);
/* sock_recvmsg doesn't fill in the name/namelen, so we must..
* possibly we should cache this in the svc_sock structure
struct svc_sock *svsk = rqstp->rq_sock;
struct svc_serv *serv = svsk->sk_server;
int len;
- struct iovec vec[RPCSVC_MAXPAGES];
+ struct kvec vec[RPCSVC_MAXPAGES];
int pnum, vlen;
dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
*/
if (svsk->sk_tcplen < 4) {
unsigned long want = 4 - svsk->sk_tcplen;
- struct iovec iov;
+ struct kvec iov;
iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
iov.iov_len = want;
int sent;
u32 reclen;
- /* Set up the first element of the reply iovec.
- * Any other iovecs that may be in use have been taken
+ /* Set up the first element of the reply kvec.
+ * Any other kvecs that may be in use have been taken
* care of by the server implementation itself.
*/
reclen = htonl(0x80000000|((xbufp->len ) - 4));
static int
proc_dodebug(ctl_table *table, int write, struct file *file,
- void __user *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
char tmpbuf[20], c, *s;
char __user *p;
unsigned int value;
size_t left, len;
- if ((file->f_pos && !write) || !*lenp) {
+ if ((*ppos && !write) || !*lenp) {
*lenp = 0;
return 0;
}
done:
*lenp -= left;
- file->f_pos += *lenp;
+ *ppos += *lenp;
return 0;
}
xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
unsigned int len)
{
- struct iovec *tail = xdr->tail;
+ struct kvec *tail = xdr->tail;
u32 *p;
xdr->pages = pages;
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
{
- struct iovec *head = xdr->head;
- struct iovec *tail = xdr->tail;
+ struct kvec *head = xdr->head;
+ struct kvec *tail = xdr->tail;
char *buf = (char *)head->iov_base;
unsigned int buflen = head->iov_len;
}
/*
- * Realign the iovec if the server missed out some reply elements
+ * Realign the kvec if the server missed out some reply elements
* (such as post-op attributes,...)
* Note: This is a simple implementation that assumes that
* len <= iov->iov_len !!!
* The RPC header (assumed to be the 1st element in the iov array)
* is not shifted.
*/
-void xdr_shift_iovec(struct iovec *iov, int nr, size_t len)
+void xdr_shift_iovec(struct kvec *iov, int nr, size_t len)
{
- struct iovec *pvec;
+ struct kvec *pvec;
for (pvec = iov + nr - 1; nr > 1; nr--, pvec--) {
- struct iovec *svec = pvec - 1;
+ struct kvec *svec = pvec - 1;
if (len > pvec->iov_len) {
printk(KERN_DEBUG "RPC: Urk! Large shift of short iovec.\n");
}
/*
- * Map a struct xdr_buf into an iovec array.
+ * Map a struct xdr_buf into an kvec array.
*/
-int xdr_kmap(struct iovec *iov_base, struct xdr_buf *xdr, size_t base)
+int xdr_kmap(struct kvec *iov_base, struct xdr_buf *xdr, size_t base)
{
- struct iovec *iov = iov_base;
+ struct kvec *iov = iov_base;
struct page **ppage = xdr->pages;
unsigned int len, pglen = xdr->page_len;
unsigned int len, pglen = xdr->page_len;
int err, ret = 0;
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
- mm_segment_t oldfs;
len = xdr->head[0].iov_len;
if (base < len || (addr != NULL && base == 0)) {
- struct iovec iov = {
+ struct kvec iov = {
.iov_base = xdr->head[0].iov_base + base,
.iov_len = len - base,
};
.msg_namelen = addrlen,
.msg_flags = msgflags,
};
-
- if (iov.iov_len != 0) {
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- }
if (xdr->len > len)
msg.msg_flags |= MSG_MORE;
- oldfs = get_fs(); set_fs(get_ds());
- err = sock_sendmsg(sock, &msg, iov.iov_len);
- set_fs(oldfs);
+
+ if (iov.iov_len != 0)
+ err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+ else
+ err = kernel_sendmsg(sock, &msg, NULL, 0, 0);
if (ret == 0)
ret = err;
else if (err > 0)
copy_tail:
len = xdr->tail[0].iov_len;
if (base < len) {
- struct iovec iov = {
+ struct kvec iov = {
.iov_base = xdr->tail[0].iov_base + base,
.iov_len = len - base,
};
struct msghdr msg = {
- .msg_iov = &iov,
- .msg_iovlen = 1,
.msg_flags = msgflags,
};
- oldfs = get_fs(); set_fs(get_ds());
- err = sock_sendmsg(sock, &msg, iov.iov_len);
- set_fs(oldfs);
+ err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
if (ret == 0)
ret = err;
else if (err > 0)
* @buf: xdr_buf
* @len: bytes to remove from buf->head[0]
*
- * Shrinks XDR buffer's header iovec buf->head[0] by
+ * Shrinks XDR buffer's header kvec buf->head[0] by
* 'len' bytes. The extra data is not lost, but is instead
* moved into the inlined pages and/or the tail.
*/
void
xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
{
- struct iovec *head, *tail;
+ struct kvec *head, *tail;
size_t copy, offs;
unsigned int pglen = buf->page_len;
void
xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
{
- struct iovec *tail;
+ struct kvec *tail;
size_t copy;
char *p;
unsigned int pglen = buf->page_len;
* @p: current pointer inside XDR buffer
*
* Note: at the moment the RPC client only passes the length of our
- * scratch buffer in the xdr_buf's header iovec. Previously this
+ * scratch buffer in the xdr_buf's header kvec. Previously this
* meant we needed to call xdr_adjust_iovec() after encoding the
* data. With the new scheme, the xdr_stream manages the details
- * of the buffer length, and takes care of adjusting the iovec
+ * of the buffer length, and takes care of adjusting the kvec
* length for us.
*/
void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
{
- struct iovec *iov = buf->head;
+ struct kvec *iov = buf->head;
xdr->buf = buf;
xdr->iov = iov;
*
* Checks that we have enough buffer space to encode 'nbytes' more
* bytes of data. If so, update the total xdr_buf length, and
- * adjust the length of the current iovec.
+ * adjust the length of the current kvec.
*/
uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
{
unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
- struct iovec *iov = buf->tail;
+ struct kvec *iov = buf->tail;
buf->pages = pages;
buf->page_base = base;
buf->page_len = len;
*/
void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
{
- struct iovec *iov = buf->head;
+ struct kvec *iov = buf->head;
unsigned int len = iov->iov_len;
if (len > buf->len)
void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
- struct iovec *iov;
+ struct kvec *iov;
ssize_t shift;
unsigned int end;
int padding;
}
EXPORT_SYMBOL(xdr_read_pages);
-static struct iovec empty_iov = {.iov_base = NULL, .iov_len = 0};
+static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
void
-xdr_buf_from_iov(struct iovec *iov, struct xdr_buf *buf)
+xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
{
buf->head[0] = *iov;
buf->tail[0] = empty_iov;
* length of subiov to zero. Decrements len by length of subiov, sets base
* to zero (or decrements it by length of iov if subiov is empty). */
static void
-iov_subsegment(struct iovec *iov, struct iovec *subiov, int *base, int *len)
+iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len)
{
if (*base > iov->iov_len) {
subiov->iov_base = NULL;
/*
* Reserve an RPC call slot.
*/
-void
-xprt_reserve(struct rpc_task *task)
-{
- struct rpc_xprt *xprt = task->tk_xprt;
-
- task->tk_status = -EIO;
- if (!xprt->shutdown) {
- spin_lock(&xprt->xprt_lock);
- do_xprt_reserve(task);
- spin_unlock(&xprt->xprt_lock);
- if (task->tk_rqstp)
- del_timer_sync(&xprt->timer);
- }
-}
-
static inline void
do_xprt_reserve(struct rpc_task *task)
{
rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
}
+void
+xprt_reserve(struct rpc_task *task)
+{
+ struct rpc_xprt *xprt = task->tk_xprt;
+
+ task->tk_status = -EIO;
+ if (!xprt->shutdown) {
+ spin_lock(&xprt->xprt_lock);
+ do_xprt_reserve(task);
+ spin_unlock(&xprt->xprt_lock);
+ if (task->tk_rqstp)
+ del_timer_sync(&xprt->timer);
+ }
+}
+
/*
* Allocate a 'unique' XID
*/
Dprintk("sk->sk_wmem_queued: %d, sk->sk_sndbuf: %d.\n",
sk->sk_wmem_queued, sk->sk_sndbuf);
- if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
if (!idle_event(req))
output_space_event(req);
Dprintk("sk->sk_wmem_queued: %d, sk->sk_sndbuf: %d.\n",
sk->sk_wmem_queued, sk->sk_sndbuf);
- if (tcp_wspace(sk) >= sk->sk_sndbuf/10*8) {
+ if (sk_stream_wspace(sk) >= sk->sk_sndbuf/10*8) {
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
if (!idle_event(req))
output_space_event(req);
sk = req->sock->sk;
printk("... sock %p, sk %p, sk->state: %d, sk->err: %d\n", req->sock, sk, sk->sk_state, sk->sk_err);
printk("... write_queue: %d, receive_queue: %d, error_queue: %d, keepalive: %d, status: %d\n", !skb_queue_empty(&sk->sk_write_queue), !skb_queue_empty(&sk->sk_receive_queue), !skb_queue_empty(&sk->sk_error_queue), req->keep_alive, req->status);
- printk("...tp->send_head: %p\n", tcp_sk(sk)->send_head);
+ printk("...tp->send_head: %p\n", sk->sk_send_head);
printk("...tp->snd_una: %08x\n", tcp_sk(sk)->snd_una);
printk("...tp->snd_nxt: %08x\n", tcp_sk(sk)->snd_nxt);
printk("...tp->packets_out: %08x\n", tcp_sk(sk)->packets_out);
static int sock_send_actor (read_descriptor_t * desc, struct page *page,
unsigned long offset, unsigned long orig_size)
{
- sock_send_desc_t *sock_desc = (sock_send_desc_t *)desc->buf;
+ sock_send_desc_t *sock_desc = (sock_send_desc_t *)desc->arg.buf;
struct socket *sock = sock_desc->sock;
tux_req_t *req = sock_desc->req;
unsigned int flags;
else
want = req->output_len;
req->desc.count = want;
- req->desc.buf = (char *) &sock_desc;
+ req->desc.arg.buf = (char *) &sock_desc;
req->desc.error = 0;
Dprintk("sendfile(), desc.count: %d.\n", req->desc.count);
do_generic_file_read(&req->in_file, &req->in_file.f_pos, &req->desc, sock_send_actor, nonblock);
req->desc.written = 0;
req->desc.count = req->output_len;
- req->desc.buf = NULL;
+ req->desc.arg.buf = NULL;
req->desc.error = 0;
do_generic_file_read(&req->in_file, &req->in_file.f_pos, &req->desc,
EXPORT_SYMBOL(xfrm4_rcv);
EXPORT_SYMBOL(xfrm4_tunnel_register);
EXPORT_SYMBOL(xfrm4_tunnel_deregister);
-EXPORT_SYMBOL(xfrm4_tunnel_check_size);
EXPORT_SYMBOL(xfrm_register_type);
EXPORT_SYMBOL(xfrm_unregister_type);
EXPORT_SYMBOL(xfrm_get_type);
return;
expired:
+ read_unlock(&xp->lock);
km_policy_expired(xp, dir, 1);
xfrm_policy_delete(xp, dir);
xfrm_pol_put(xp);
xfrm_put_type(x->type);
}
kfree(x);
- wake_up(&km_waitq);
}
static void xfrm_state_gc_task(void *data)
x = list_entry(entry, struct xfrm_state, bydst);
xfrm_state_gc_destroy(x);
}
+ wake_up(&km_waitq);
}
static inline unsigned long make_jiffies(long secs)
spin_lock_bh(&xfrm_state_lock);
x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
- if (!x1) {
- x1 = afinfo->find_acq(
- x->props.mode, x->props.reqid, x->id.proto,
- &x->id.daddr, &x->props.saddr, 0);
- if (x1 && x1->id.spi != x->id.spi && x1->id.spi) {
- xfrm_state_put(x1);
- x1 = NULL;
- }
- }
-
- if (x1 && x1->id.spi) {
+ if (x1) {
xfrm_state_put(x1);
x1 = NULL;
err = -EEXIST;
goto out;
}
+ x1 = afinfo->find_acq(
+ x->props.mode, x->props.reqid, x->id.proto,
+ &x->id.daddr, &x->props.saddr, 0);
+
__xfrm_state_insert(x);
err = 0;
for (h=0; h<maxspi-minspi+1; h++) {
spi = minspi + net_random()%(maxspi-minspi+1);
x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
- if (x0 == NULL)
+ if (x0 == NULL) {
+ x->id.spi = htonl(spi);
break;
+ }
xfrm_state_put(x0);
}
- x->id.spi = htonl(spi);
}
if (x->id.spi) {
spin_lock_bh(&xfrm_state_lock);
if (err)
return err;
+ xfrm_probe_algs();
+
x = xfrm_state_construct(p, (struct rtattr **) xfrma, &err);
if (!x)
return err;
# docproc: Preprocess .tmpl file in order to generate .sgml docs
# conmakehash: Create arrays for initializing the kernel console tables
-host-progs := conmakehash kallsyms modpost mk_elfconfig pnmtologo bin2c
-always := $(host-progs) empty.o
-
-modpost-objs := modpost.o file2alias.o sumversion.o
+host-progs := conmakehash kallsyms pnmtologo bin2c
+always := $(host-progs)
subdir-$(CONFIG_MODVERSIONS) += genksyms
+subdir-y += mod
# Let clean descend into subdirs
subdir- += basic lxdialog kconfig package
-
-# dependencies on generated files need to be listed explicitly
-
-$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h
-
-quiet_cmd_elfconfig = MKELF $@
- cmd_elfconfig = $(obj)/mk_elfconfig $(ARCH) < $< > $@
-
-$(obj)/elfconfig.h: $(obj)/empty.o $(obj)/mk_elfconfig FORCE
- $(call if_changed,elfconfig)
-
-targets += elfconfig.h
# Step 2), invoke modpost
# Includes step 3,4
quiet_cmd_modpost = MODPOST
- cmd_modpost = scripts/modpost \
+ cmd_modpost = scripts/mod/modpost \
$(if $(KBUILD_EXTMOD),-i,-o) $(symverfile) \
$(filter-out FORCE,$^)
71, 94, 92, 82, 0, 0, 62, 0, 63, 0,
62, 63, 0, 64, 0, 65, 0, 5, 0, 16,
0, 20, 0, 11, 0, 13, 0, 66, 0, 70,
- 0, 27, 46, 65, 47, 0, 21, 36, 0, 23,
+ 0, 27, 46, 62, 47, 0, 21, 36, 0, 23,
36, 0, 10, 36, 0, 21, 36, 84, 0, 23,
36, 84, 0, 10, 36, 31, 0, 10, 31, 0,
21, 84, 0, 23, 84, 0, 7, 0, 18, 0,
};
static const short yypact[] = {-32768,
- 19,-32768, 175,-32768, 32,-32768,-32768,-32768,-32768,-32768,
+ 15,-32768, 197,-32768, 23,-32768,-32768,-32768,-32768,-32768,
-18,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,
--32768, -30,-32768, -26,-32768,-32768,-32768, -32, -10, -2,
--32768,-32768,-32768,-32768, 2, 428,-32768,-32768,-32768,-32768,
--32768,-32768,-32768,-32768,-32768,-32768,-32768, 34, 12, 79,
--32768, 428, 12,-32768, 455, 33,-32768,-32768, 15, 14,
- 35, 29,-32768, 2, -14, -21,-32768,-32768,-32768, 67,
- 31, 37, 127,-32768,-32768, 2,-32768, 54, 60, 66,
- 69,-32768, 14,-32768,-32768, 2,-32768,-32768,-32768, 84,
--32768, 219,-32768,-32768, 70,-32768, 20, 91, 72, 84,
- -20, 74, 81,-32768,-32768,-32768, 86,-32768, 102,-32768,
- 106,-32768,-32768,-32768,-32768,-32768, 109, 108, 348, 112,
- 126, 117,-32768,-32768, 118,-32768, 122,-32768,-32768,-32768,
--32768, 262,-32768, 31,-32768, 131,-32768,-32768,-32768,-32768,
--32768, 7, 120,-32768, -9,-32768,-32768, 392,-32768,-32768,
- 125, 130,-32768,-32768, 132,-32768, 159,-32768,-32768, 305,
--32768,-32768,-32768,-32768,-32768,-32768, 160, 161,-32768,-32768,
- 174,-32768
+-32768, -28,-32768, -25,-32768,-32768,-32768, -26, -22, -12,
+-32768,-32768,-32768,-32768, 49, 493,-32768,-32768,-32768,-32768,
+-32768,-32768,-32768,-32768,-32768,-32768,-32768, 27, -8, 101,
+-32768, 493, -8,-32768, 493, 10,-32768,-32768, 11, 9,
+ 18, 26,-32768, 49, -15, -13,-32768,-32768,-32768, 25,
+ 24, 48, 149,-32768,-32768, 49,-32768, 414, 39, 40,
+ 47,-32768, 9,-32768,-32768, 49,-32768,-32768,-32768, 66,
+-32768, 241,-32768,-32768, 50,-32768, 5, 65, 42, 66,
+ 17, 56, 55,-32768,-32768,-32768, 60,-32768, 75,-32768,
+ 80,-32768,-32768,-32768,-32768,-32768, 81, 82, 370, 85,
+ 98, 89,-32768,-32768, 88,-32768, 91,-32768,-32768,-32768,
+-32768, 284,-32768, 24,-32768, 103,-32768,-32768,-32768,-32768,
+-32768, 8, 43,-32768, 30,-32768,-32768, 457,-32768,-32768,
+ 92, 93,-32768,-32768, 95,-32768, 96,-32768,-32768, 327,
+-32768,-32768,-32768,-32768,-32768,-32768, 99, 104,-32768,-32768,
+ 148,-32768
};
static const short yypgoto[] = {-32768,
- 208,-32768,-32768,-32768, 158,-32768,-32768, 128, 0, -90,
- -36,-32768, 157,-32768, -70,-32768,-32768, -51, -31,-32768,
- -40,-32768, -125,-32768,-32768, 65, -97,-32768,-32768,-32768,
--32768, -19,-32768,-32768, 143,-32768,-32768, 83, 124, 141,
+ 152,-32768,-32768,-32768, 119,-32768,-32768, 94, 0, -55,
+ -35,-32768,-32768,-32768, -69,-32768,-32768, -56, -30,-32768,
+ -76,-32768, -122,-32768,-32768, 29, -62,-32768,-32768,-32768,
+-32768, -17,-32768,-32768, 105,-32768,-32768, 52, 86, 83,
-32768,-32768,-32768
};
-#define YYLAST 495
-
-
-static const short yytable[] = { 67,
- 99, 119, 35, 65, 54, 49, 152, 155, 84, 53,
- 91, 131, 47, 55, 88, 80, 89, 48, 171, 50,
- 125, 9, 159, 50, 92, 132, 99, 81, 99, 69,
- 18, 114, 87, 77, 168, 56, 160, 58, -89, 27,
- 57, 119, 140, 31, 157, 158, 156, 59, 143, 60,
- 58, 76, 142, -89, 60, 126, 127, 119, 129, 96,
- 59, 50, 60, 99, 68, 97, 95, 60, 79, 119,
- 96, 143, 143, 86, 45, 46, 97, 85, 60, 70,
- 106, 98, 67, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
- 110, 24, 25, 26, 27, 28, 111, 126, 31, 93,
- 94, 96, 112, 116, -19, 113, 133, 97, 32, 60,
- 98, -19, -103, 128, -19, 134, -19, 107, 93, -19,
- 88, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 135, 24,
- 25, 26, 27, 28, 139, 140, 31, 136, 146, 156,
- 147, 148, -19, 154, 149, 142, 32, 60, 150, -19,
- -104, 163, -19, 172, -19, 5, 164, -19, 165, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
- 27, 28, 29, 30, 31, 166, 169, 170, 4, 75,
- -19, 78, 162, 115, 32, 108, 153, -19, 124, 118,
- -19, 0, -19, 6, 7, 8, 9, 10, 11, 12,
+#define YYLAST 533
+
+
+static const short yytable[] = { 78,
+ 67, 99, 35, 84, 65, 125, 54, 49, 155, 152,
+ 53, 80, 47, 88, 171, 89, 9, 48, 91, 55,
+ 127, 50, 129, 56, 50, 18, 114, 99, 81, 99,
+ 57, 69, 92, 87, 27, 77, 119, 168, 31, -89,
+ 126, 50, 67, 140, 96, 79, 58, 156, 131, 143,
+ 97, 76, 60, 142, -89, 60, 59, 68, 60, 95,
+ 85, 159, 132, 96, 99, 45, 46, 93, 94, 97,
+ 86, 60, 143, 143, 98, 160, 119, 126, 140, 157,
+ 158, 96, 156, 67, 58, 111, 112, 97, 142, 60,
+ 60, 106, 119, 113, 59, 116, 60, 128, 133, 134,
+ 98, 70, 93, 88, 119, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 135, 24, 25, 26, 27, 28, 139, 136,
+ 31, 146, 147, 148, 149, 154, -19, 150, 163, 164,
+ 32, 165, 166, -19, -103, 169, -19, 172, -19, 107,
+ 170, -19, 4, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
- 130, 24, 25, 26, 27, 28, 0, 0, 31, 0,
- 0, 0, 0, -82, 0, 0, 0, 0, 32, 0,
- 0, 0, 151, 0, 0, -82, 6, 7, 8, 9,
- 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, 22, 0, 24, 25, 26, 27, 28, 0,
- 0, 31, 0, 0, 0, 0, -82, 0, 0, 0,
- 0, 32, 0, 0, 0, 167, 0, 0, -82, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 0, 24, 25, 26,
- 27, 28, 0, 0, 31, 0, 0, 0, 0, -82,
- 0, 0, 0, 0, 32, 0, 0, 0, 0, 0,
- 0, -82, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 0,
- 24, 25, 26, 27, 28, 0, 0, 31, 0, 0,
- 0, 0, 0, 140, 0, 0, 0, 141, 0, 0,
- 0, 0, 0, 142, 0, 60, 6, 7, 8, 9,
- 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, 22, 0, 24, 25, 26, 27, 28, 0,
- 0, 31, 0, 0, 0, 0, 161, 0, 0, 0,
- 0, 32, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 0,
- 24, 25, 26, 27, 28, 0, 0, 31, 0, 0,
- 7, 8, 9, 10, 11, 0, 13, 32, 15, 16,
- 0, 18, 19, 20, 0, 22, 0, 24, 25, 26,
- 27, 28, 0, 0, 31, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 32
+ 75, 24, 25, 26, 27, 28, 162, 108, 31, 115,
+ 124, 0, 130, 0, -19, 153, 0, 0, 32, 0,
+ 0, -19, -104, 0, -19, 0, -19, 5, 0, -19,
+ 0, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 0, 0, 0,
+ 0, 0, -19, 0, 0, 0, 32, 0, 0, -19,
+ 0, 118, -19, 0, -19, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 0, 24, 25, 26, 27, 28, 0, 0,
+ 31, 0, 0, 0, 0, -82, 0, 0, 0, 0,
+ 32, 0, 0, 0, 151, 0, 0, -82, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 0, 24, 25, 26, 27,
+ 28, 0, 0, 31, 0, 0, 0, 0, -82, 0,
+ 0, 0, 0, 32, 0, 0, 0, 167, 0, 0,
+ -82, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 0, 24,
+ 25, 26, 27, 28, 0, 0, 31, 0, 0, 0,
+ 0, -82, 0, 0, 0, 0, 32, 0, 0, 0,
+ 0, 0, 0, -82, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 0, 24, 25, 26, 27, 28, 0, 0, 31,
+ 0, 0, 0, 0, 0, 140, 0, 0, 0, 141,
+ 0, 0, 0, 0, 0, 142, 0, 60, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 0, 24, 25, 26, 27,
+ 28, 0, 0, 31, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 32, 0, 0, 0, 0, 0, 0,
+ 110, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 0, 24,
+ 25, 26, 27, 28, 0, 0, 31, 0, 0, 0,
+ 0, 161, 0, 0, 0, 0, 32, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 0, 24, 25, 26, 27, 28,
+ 0, 0, 31, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 32
};
-static const short yycheck[] = { 36,
- 71, 92, 3, 35, 24, 36, 132, 1, 60, 36,
- 32, 32, 31, 46, 29, 1, 31, 36, 0, 50,
- 1, 8, 32, 50, 46, 46, 97, 59, 99, 49,
- 17, 83, 64, 53, 160, 46, 46, 36, 32, 26,
- 43, 132, 36, 30, 142, 143, 40, 46, 119, 48,
- 36, 52, 46, 47, 48, 36, 97, 148, 99, 40,
- 46, 50, 48, 134, 31, 46, 36, 48, 36, 160,
- 40, 142, 143, 45, 43, 44, 46, 43, 48, 1,
- 44, 51, 119, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 47, 23, 24, 25, 26, 27, 47, 36, 30, 43,
- 44, 40, 47, 30, 36, 47, 43, 46, 40, 48,
- 51, 43, 44, 33, 46, 45, 48, 1, 43, 51,
- 29, 5, 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 43, 23,
- 24, 25, 26, 27, 47, 36, 30, 49, 47, 40,
- 35, 45, 36, 33, 47, 46, 40, 48, 47, 43,
- 44, 47, 46, 0, 48, 1, 47, 51, 47, 5,
- 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
- 26, 27, 28, 29, 30, 47, 47, 47, 1, 52,
- 36, 55, 148, 86, 40, 73, 134, 43, 95, 1,
- 46, -1, 48, 5, 6, 7, 8, 9, 10, 11,
+static const short yycheck[] = { 55,
+ 36, 71, 3, 60, 35, 1, 24, 36, 1, 132,
+ 36, 1, 31, 29, 0, 31, 8, 36, 32, 46,
+ 97, 50, 99, 46, 50, 17, 83, 97, 59, 99,
+ 43, 49, 46, 64, 26, 53, 92, 160, 30, 32,
+ 36, 50, 78, 36, 40, 36, 36, 40, 32, 119,
+ 46, 52, 48, 46, 47, 48, 46, 31, 48, 36,
+ 43, 32, 46, 40, 134, 43, 44, 43, 44, 46,
+ 45, 48, 142, 143, 51, 46, 132, 36, 36, 142,
+ 143, 40, 40, 119, 36, 47, 47, 46, 46, 48,
+ 48, 44, 148, 47, 46, 30, 48, 33, 43, 45,
+ 51, 1, 43, 29, 160, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 43, 23, 24, 25, 26, 27, 47, 49,
+ 30, 47, 35, 45, 47, 33, 36, 47, 47, 47,
+ 40, 47, 47, 43, 44, 47, 46, 0, 48, 1,
+ 47, 51, 1, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 100, 23, 24, 25, 26, 27, -1, -1, 30, -1,
- -1, -1, -1, 35, -1, -1, -1, -1, 40, -1,
- -1, -1, 1, -1, -1, 47, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
- 19, 20, 21, -1, 23, 24, 25, 26, 27, -1,
- -1, 30, -1, -1, -1, -1, 35, -1, -1, -1,
- -1, 40, -1, -1, -1, 1, -1, -1, 47, 5,
- 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, -1, 23, 24, 25,
- 26, 27, -1, -1, 30, -1, -1, -1, -1, 35,
- -1, -1, -1, -1, 40, -1, -1, -1, -1, -1,
- -1, 47, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
- 23, 24, 25, 26, 27, -1, -1, 30, -1, -1,
- -1, -1, -1, 36, -1, -1, -1, 40, -1, -1,
- -1, -1, -1, 46, -1, 48, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
- 19, 20, 21, -1, 23, 24, 25, 26, 27, -1,
- -1, 30, -1, -1, -1, -1, 35, -1, -1, -1,
- -1, 40, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
- 23, 24, 25, 26, 27, -1, -1, 30, -1, -1,
- 6, 7, 8, 9, 10, -1, 12, 40, 14, 15,
- -1, 17, 18, 19, -1, 21, -1, 23, 24, 25,
- 26, 27, -1, -1, 30, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 40
+ 52, 23, 24, 25, 26, 27, 148, 73, 30, 86,
+ 95, -1, 100, -1, 36, 134, -1, -1, 40, -1,
+ -1, 43, 44, -1, 46, -1, 48, 1, -1, 51,
+ -1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, -1, -1, -1,
+ -1, -1, 36, -1, -1, -1, 40, -1, -1, 43,
+ -1, 1, 46, -1, 48, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, -1, 23, 24, 25, 26, 27, -1, -1,
+ 30, -1, -1, -1, -1, 35, -1, -1, -1, -1,
+ 40, -1, -1, -1, 1, -1, -1, 47, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, -1, 23, 24, 25, 26,
+ 27, -1, -1, 30, -1, -1, -1, -1, 35, -1,
+ -1, -1, -1, 40, -1, -1, -1, 1, -1, -1,
+ 47, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, -1, 23,
+ 24, 25, 26, 27, -1, -1, 30, -1, -1, -1,
+ -1, 35, -1, -1, -1, -1, 40, -1, -1, -1,
+ -1, -1, -1, 47, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, -1, 23, 24, 25, 26, 27, -1, -1, 30,
+ -1, -1, -1, -1, -1, 36, -1, -1, -1, 40,
+ -1, -1, -1, -1, -1, 46, -1, 48, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, -1, 23, 24, 25, 26,
+ 27, -1, -1, 30, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 40, -1, -1, -1, -1, -1, -1,
+ 47, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, -1, 23,
+ 24, 25, 26, 27, -1, -1, 30, -1, -1, -1,
+ -1, 35, -1, -1, -1, -1, 40, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, 23, 24, 25, 26, 27,
+ -1, -1, 30, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 40
};
/* -*-C-*- Note some compilers choke on comments on `#line' lines. */
#line 3 "/usr/lib/bison.simple"
strncpy(menu_item, item, menu_width);
menu_item[menu_width] = 0;
- j = first_alpha(menu_item, "YyNnMm");
+ j = first_alpha(menu_item, "YyNnMmHh");
/* Clear 'residue' of last item */
wattrset (win, menubox_attr);
if (key < 256 && isalpha(key)) key = tolower(key);
- if (strchr("ynm", key))
+ if (strchr("ynmh", key))
i = max_choice;
else {
for (i = choice+1; i < max_choice; i++) {
- j = first_alpha(items[(scroll+i)*2+1], "YyNnMm");
+ j = first_alpha(items[(scroll+i)*2+1], "YyNnMmHh");
if (key == tolower(items[(scroll+i)*2+1][j]))
break;
}
if (i == max_choice)
for (i = 0; i < max_choice; i++) {
- j = first_alpha(items[(scroll+i)*2+1], "YyNnMm");
+ j = first_alpha(items[(scroll+i)*2+1], "YyNnMmHh");
if (key == tolower(items[(scroll+i)*2+1][j]))
break;
}
MKSPEC := $(srctree)/scripts/package/mkspec
PREV := set -e; cd ..;
+# rpm-pkg
.PHONY: rpm-pkg rpm
-$(objtree)/kernel.spec: $(MKSPEC)
+$(objtree)/kernel.spec: $(MKSPEC) $(srctree)/Makefile
$(CONFIG_SHELL) $(MKSPEC) > $@
rpm-pkg rpm: $(objtree)/kernel.spec
clean-rule += rm -f $(objtree)/kernel.spec
+# binrpm-pkg
+.PHONY: binrpm-pkg
+$(objtree)/binkernel.spec: $(MKSPEC) $(srctree)/Makefile
+ $(CONFIG_SHELL) $(MKSPEC) prebuilt > $@
+
+binrpm-pkg: $(objtree)/binkernel.spec
+ $(MAKE)
+ set -e; \
+ $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
+ set -e; \
+ mv -f $(objtree)/.tmp_version $(objtree)/.version
+
+ $(RPM) --define "_builddir $(srctree)" --target $(UTS_MACHINE) -bb $<
+
+clean-rule += rm -f $(objtree)/binkernel.spec
+
# Deb target
# ---------------------------------------------------------------------------
#
# ---------------------------------------------------------------------------
help:
@echo ' rpm-pkg - Build the kernel as an RPM package'
+ @echo ' binrpm-pkg - Build an rpm package containing the compiled kernel & modules'
@echo ' deb-pkg - Build the kernel as an deb package'
# Patched for non-x86 by Opencon (L) 2002 <opencon@rio.skydome.net>
#
+# how we were called determines which rpms we build and how we build them
+if [ "$1" = "prebuilt" ]; then
+ PREBUILT=true
+else
+ PREBUILT=false
+fi
+
# starting to output the spec
if [ "`grep CONFIG_DRM=y .config | cut -f2 -d\=`" = "y" ]; then
PROVIDES=kernel-drm
echo "Group: System Environment/Kernel"
echo "Vendor: The Linux Community"
echo "URL: http://www.kernel.org"
+
+if ! $PREBUILT; then
echo -n "Source: kernel-$VERSION.$PATCHLEVEL.$SUBLEVEL"
echo "$EXTRAVERSION.tar.gz" | sed -e "s/-//g"
+fi
+
echo "BuildRoot: /var/tmp/%{name}-%{PACKAGE_VERSION}-root"
echo "Provides: $PROVIDES"
echo "%define __spec_install_post /usr/lib/rpm/brp-compress || :"
echo "%description"
echo "The Linux Kernel, the operating system core itself"
echo ""
+
+if ! $PREBUILT; then
echo "%prep"
echo "%setup -q"
echo ""
+fi
+
echo "%build"
+
+if ! $PREBUILT; then
echo "make clean && make"
echo ""
+fi
+
echo "%install"
echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib $RPM_BUILD_ROOT/lib/modules'
#include <net/ipv6.h>
#include <linux/hugetlb.h>
#include <linux/major.h>
+#include <linux/personality.h>
#include "avc.h"
#include "objsec.h"
if (rc)
return rc;
+ /* Clear any possibly unsafe personality bits on exec: */
+ current->personality &= ~PER_CLEAR_ON_SETID;
+
/* Set the security field to the new SID. */
bsec->sid = newsid;
}
endmenu
menu "Open Sound System"
- depends on SOUND!=n
+ depends on SOUND!=n && (BROKEN || !SPARC64)
config SOUND_PRIME
tristate "Open Sound System (DEPRECATED)"
struct snd_info_entry *entry;
snd_info_buffer_t *buf;
size_t size = 0;
+ loff_t pos;
data = snd_magic_cast(snd_info_private_data_t, file->private_data, return -ENXIO);
snd_assert(data != NULL, return -ENXIO);
+ pos = *offset;
+ if (pos < 0 || (long) pos != pos || (ssize_t) count < 0)
+ return -EIO;
+ if ((unsigned long) pos + (unsigned long) count < (unsigned long) pos)
+ return -EIO;
entry = data->entry;
switch (entry->content) {
case SNDRV_INFO_CONTENT_TEXT:
buf = data->rbuffer;
if (buf == NULL)
return -EIO;
- if (file->f_pos >= (long)buf->size)
+ if (pos >= buf->size)
return 0;
- size = buf->size - file->f_pos;
+ size = buf->size - pos;
size = min(count, size);
- if (copy_to_user(buffer, buf->buffer + file->f_pos, size))
+ if (copy_to_user(buffer, buf->buffer + pos, size))
return -EFAULT;
- file->f_pos += size;
break;
case SNDRV_INFO_CONTENT_DATA:
if (entry->c.ops->read)
- return entry->c.ops->read(entry,
+ size = entry->c.ops->read(entry,
data->file_private_data,
- file, buffer, count);
+ file, buffer, count, pos);
break;
}
+ if ((ssize_t) size > 0)
+ *offset = pos + size;
return size;
}
struct snd_info_entry *entry;
snd_info_buffer_t *buf;
size_t size = 0;
+ loff_t pos;
data = snd_magic_cast(snd_info_private_data_t, file->private_data, return -ENXIO);
snd_assert(data != NULL, return -ENXIO);
entry = data->entry;
+ pos = *offset;
+ if (pos < 0 || (long) pos != pos || (ssize_t) count < 0)
+ return -EIO;
+ if ((unsigned long) pos + (unsigned long) count < (unsigned long) pos)
+ return -EIO;
switch (entry->content) {
case SNDRV_INFO_CONTENT_TEXT:
buf = data->wbuffer;
if (buf == NULL)
return -EIO;
- if (file->f_pos < 0)
- return -EINVAL;
- if (file->f_pos >= (long)buf->len)
+ if (pos >= buf->len)
return -ENOMEM;
- size = buf->len - file->f_pos;
+ size = buf->len - pos;
size = min(count, size);
- if (copy_from_user(buf->buffer + file->f_pos, buffer, size))
+ if (copy_from_user(buf->buffer + pos, buffer, size))
return -EFAULT;
- if ((long)buf->size < file->f_pos + size)
- buf->size = file->f_pos + size;
- file->f_pos += size;
+ if ((long)buf->size < pos + size)
+ buf->size = pos + size;
break;
case SNDRV_INFO_CONTENT_DATA:
if (entry->c.ops->write)
- return entry->c.ops->write(entry,
+ size = entry->c.ops->write(entry,
data->file_private_data,
- file, buffer, count);
+ file, buffer, count, pos);
break;
}
+ if ((ssize_t) size > 0)
+ *offset = pos + size;
return size;
}
else
printk("pcm_oss: read: recovering from SUSPEND\n");
#endif
- ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, 0);
+ ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL);
if (ret < 0)
break;
} else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) {
}
if (ret == -EPIPE) {
if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
- ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, 0);
+ ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
if (ret < 0)
break;
}
else
printk("pcm_oss: readv: recovering from SUSPEND\n");
#endif
- ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, 0);
+ ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL);
if (ret < 0)
break;
} else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) {
snd_pcm_oss_setup_t *psetup = NULL, *csetup = NULL;
int nonblock;
wait_queue_t wait;
-
- printk("application %s uses obsolete OSS audio interface\n",current->comm);
+ static char printed_comm[16];
+
+ if (strncmp(printed_comm, current->comm, 16)) {
+ printk("application %s uses obsolete OSS audio interface\n",
+ current->comm);
+ memcpy(printed_comm, current->comm, 16);
+ }
snd_assert(cardnum >= 0 && cardnum < SNDRV_CARDS, return -ENXIO);
device = SNDRV_MINOR_OSS_DEVICE(minor) == SNDRV_MINOR_OSS_PCM1 ?
const char *snd_pcm_stream_name(snd_pcm_stream_t stream)
{
- snd_assert(stream <= SNDRV_PCM_STREAM_LAST, return 0);
+ snd_assert(stream <= SNDRV_PCM_STREAM_LAST, return NULL);
return snd_pcm_stream_names[stream];
}
const char *snd_pcm_access_name(snd_pcm_access_t access)
{
- snd_assert(access <= SNDRV_PCM_ACCESS_LAST, return 0);
+ snd_assert(access <= SNDRV_PCM_ACCESS_LAST, return NULL);
return snd_pcm_access_names[access];
}
const char *snd_pcm_format_name(snd_pcm_format_t format)
{
- snd_assert(format <= SNDRV_PCM_FORMAT_LAST, return 0);
+ snd_assert(format <= SNDRV_PCM_FORMAT_LAST, return NULL);
return snd_pcm_format_names[format];
}
const char *snd_pcm_subformat_name(snd_pcm_subformat_t subformat)
{
- snd_assert(subformat <= SNDRV_PCM_SUBFORMAT_LAST, return 0);
+ snd_assert(subformat <= SNDRV_PCM_SUBFORMAT_LAST, return NULL);
return snd_pcm_subformat_names[subformat];
}
const char *snd_pcm_tstamp_mode_name(snd_pcm_tstamp_t mode)
{
- snd_assert(mode <= SNDRV_PCM_TSTAMP_LAST, return 0);
+ snd_assert(mode <= SNDRV_PCM_TSTAMP_LAST, return NULL);
return snd_pcm_tstamp_mode_names[mode];
}
const char *snd_pcm_state_name(snd_pcm_state_t state)
{
- snd_assert(state <= SNDRV_PCM_STATE_LAST, return 0);
+ snd_assert(state <= SNDRV_PCM_STATE_LAST, return NULL);
return snd_pcm_state_names[state];
}
}
static long snd_opl4_mem_proc_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *_buf, long count)
+ struct file *file, char __user *_buf,
+ unsigned long count, unsigned long pos)
{
opl4_t *opl4 = snd_magic_cast(opl4_t, entry->private_data, return -ENXIO);
long size;
char* buf;
size = count;
- if (file->f_pos + size > entry->size)
- size = entry->size - file->f_pos;
+ if (pos + size > entry->size)
+ size = entry->size - pos;
if (size > 0) {
buf = vmalloc(size);
if (!buf)
return -ENOMEM;
- snd_opl4_read_memory(opl4, buf, file->f_pos, size);
+ snd_opl4_read_memory(opl4, buf, pos, size);
if (copy_to_user(_buf, buf, size)) {
vfree(buf);
return -EFAULT;
}
vfree(buf);
- file->f_pos += size;
return size;
}
return 0;
}
static long snd_opl4_mem_proc_write(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, const char __user *_buf, long count)
+ struct file *file, const char __user *_buf,
+ unsigned long count, unsigned long pos)
{
opl4_t *opl4 = snd_magic_cast(opl4_t, entry->private_data, return -ENXIO);
long size;
char *buf;
size = count;
- if (file->f_pos + size > entry->size)
- size = entry->size - file->f_pos;
+ if (pos + size > entry->size)
+ size = entry->size - pos;
if (size > 0) {
buf = vmalloc(size);
if (!buf)
vfree(buf);
return -EFAULT;
}
- snd_opl4_write_memory(opl4, buf, file->f_pos, size);
+ snd_opl4_write_memory(opl4, buf, pos, size);
vfree(buf);
- file->f_pos += size;
return size;
}
return 0;
break;
if (snd_rawmidi_transmit(substream, &midi_byte, 1) != 1)
break;
-#if SNDRV_SERIAL_MS124W_MB_NOCOMBO
+#ifdef SNDRV_SERIAL_MS124W_MB_NOCOMBO
/* select exactly one of the four ports */
addr_byte = (1 << (substream->number + 4)) | 0x08;
#else
struct vx_rmh rmh;
int data_mode;
- *pipep = 0;
+ *pipep = NULL;
vx_init_rmh(&rmh, CMD_RES_PIPE);
vx_set_pipe_cmd_params(&rmh, capture, audioid, num_audio);
#if 0 // NYI
{
snd_pcm_runtime_t *runtime = subs->runtime;
vx_core_t *chip = snd_pcm_substream_chip(subs);
- vx_pipe_t *pipe = 0;
+ vx_pipe_t *pipe = NULL;
unsigned int audio;
int err;
pipe = snd_magic_cast(vx_pipe_t, subs->runtime->private_data, return -EINVAL);
if (--pipe->references == 0) {
- chip->playback_pipes[pipe->number] = 0;
+ chip->playback_pipes[pipe->number] = NULL;
vx_free_pipe(chip, pipe);
}
if (! subs->runtime->private_data)
return -EINVAL;
pipe = snd_magic_cast(vx_pipe_t, subs->runtime->private_data, return -EINVAL);
- chip->capture_pipes[pipe->number] = 0;
+ chip->capture_pipes[pipe->number] = NULL;
pipe_out_monitoring = pipe->monitoring_pipe;
if (pipe_out_monitoring) {
if (--pipe_out_monitoring->references == 0) {
vx_free_pipe(chip, pipe_out_monitoring);
- chip->playback_pipes[pipe->number] = 0;
- pipe->monitoring_pipe = 0;
+ chip->playback_pipes[pipe->number] = NULL;
+ pipe->monitoring_pipe = NULL;
}
}
chip->pcm[pcm->device] = NULL;
if (chip->playback_pipes) {
kfree(chip->playback_pipes);
- chip->playback_pipes = 0;
+ chip->playback_pipes = NULL;
}
if (chip->capture_pipes) {
kfree(chip->capture_pipes);
- chip->capture_pipes = 0;
+ chip->capture_pipes = NULL;
}
}
* Linux Video interface
*/
-static int snd_tea575x_do_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg)
+static int snd_tea575x_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long data)
{
struct video_device *dev = video_devdata(file);
tea575x_t *tea = video_get_drvdata(dev);
+ void __user *arg = (void __user *)data;
switch(cmd) {
case VIDIOCGCAP:
}
}
-static int snd_tea575x_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return video_usercopy(inode, file, cmd, arg, snd_tea575x_do_ioctl);
-}
-
/*
* initialize all the tea575x chips
*/
config SND_SB16_CSP
bool "Sound Blaster 16/AWE CSP support"
- depends on SND_SB16 || SND_SBAWE
+ depends on (SND_SB16 || SND_SBAWE) && (BROKEN || !PPC)
help
Say 'Y' to include support for CSP core. This special coprocessor
can do variable tasks like various compression and decompression
else
nblock->prev->next = nblock;
up(&alloc->memory_mutex);
- return 0;
+ return NULL;
}
pblock = pblock->next;
}
} gus_proc_private_t;
static long snd_gf1_mem_proc_dump(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf, long count)
+ struct file *file, char __user *buf,
+ unsigned long count, unsigned long pos)
{
long size;
gus_proc_private_t *priv = snd_magic_cast(gus_proc_private_t, entry->private_data, return -ENXIO);
int err;
size = count;
- if (file->f_pos + size > priv->size)
- size = (long)priv->size - file->f_pos;
+ if (pos + size > priv->size)
+ size = (long)priv->size - pos;
if (size > 0) {
- if ((err = snd_gus_dram_read(gus, buf, file->f_pos, size, priv->rom)) < 0)
+ if ((err = snd_gus_dram_read(gus, buf, pos, size, priv->rom)) < 0)
return err;
- file->f_pos += size;
return size;
}
return 0;
emu8k_pcm_t *rec = subs->runtime->private_data;
if (rec)
kfree(rec);
- subs->runtime->private_data = 0;
+ subs->runtime->private_data = NULL;
return 0;
}
runtime->hw.rate_max = 44100;
runtime->hw.channels_max = 2;
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- snd_sb8_hw_constraint_rate_channels, 0,
+ snd_sb8_hw_constraint_rate_channels, NULL,
SNDRV_PCM_HW_PARAM_CHANNELS,
SNDRV_PCM_HW_PARAM_RATE, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- snd_sb8_hw_constraint_channels_rate, 0,
+ snd_sb8_hw_constraint_channels_rate, NULL,
SNDRV_PCM_HW_PARAM_RATE, -1);
break;
case SB_HW_201:
#else
#define DPRINT(cond, args...) \
if ((dev->debug & (cond)) == (cond)) { \
- snd_printk (##args); \
+ snd_printk (args); \
}
#endif
#else
{ 0x0E, "Bad MIDI channel number" },
{ 0x10, "Download Record Error" },
{ 0x80, "Success" },
- { 0x0, 0x0 }
+ { 0x0 }
};
#define NEEDS_ACK 1
if (cmd == WFC_DOWNLOAD_MULTISAMPLE) {
wfcmd->write_cnt = (unsigned long) rbuf;
- rbuf = 0;
+ rbuf = NULL;
}
DPRINT (WF_DEBUG_CMD, "0x%x [%s] (%d,%d,%d)\n",
wbuf[0] = sample_num & 0x7f;
wbuf[1] = sample_num >> 7;
- if ((x = snd_wavefront_cmd (dev, WFC_DELETE_SAMPLE, 0, wbuf)) == 0) {
+ if ((x = snd_wavefront_cmd (dev, WFC_DELETE_SAMPLE, NULL, wbuf)) == 0) {
dev->sample_status[sample_num] = WF_ST_EMPTY;
}
bptr = munge_int32 (header->number, buf, 2);
munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES);
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PATCH, 0, buf)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PATCH, NULL, buf)) {
snd_printk ("download patch failed\n");
return -(EIO);
}
buf[0] = header->number;
munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES);
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PROGRAM, 0, buf)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PROGRAM, NULL, buf)) {
snd_printk ("download patch failed\n");
return -(EIO);
}
{
char rbuf[8];
- if (snd_wavefront_cmd (dev, WFC_REPORT_FREE_MEMORY, rbuf, 0)) {
+ if (snd_wavefront_cmd (dev, WFC_REPORT_FREE_MEMORY, rbuf, NULL)) {
snd_printk ("can't get memory stats.\n");
return -1;
} else {
u16 sample_short;
u32 length;
- u16 __user *data_end = 0;
+ u16 __user *data_end = NULL;
unsigned int i;
const unsigned int max_blksize = 4096/2;
unsigned int written;
if (snd_wavefront_cmd (dev,
header->size ?
WFC_DOWNLOAD_SAMPLE : WFC_DOWNLOAD_SAMPLE_HEADER,
- 0, sample_hdr)) {
+ NULL, sample_hdr)) {
snd_printk ("sample %sdownload refused.\n",
header->size ? "" : "header ");
return -(EIO);
blocksize = ((length-written+7)&~0x7);
}
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_BLOCK, 0, 0)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_BLOCK, NULL, NULL)) {
snd_printk ("download block "
"request refused.\n");
return -(EIO);
munge_int32 (header->hdr.a.FrequencyBias, &alias_hdr[20], 3);
munge_int32 (*(&header->hdr.a.FrequencyBias+1), &alias_hdr[23], 2);
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_SAMPLE_ALIAS, 0, alias_hdr)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) {
snd_printk ("download alias failed.\n");
return -(EIO);
}
munge_int32 (((unsigned char *)drum)[i], &drumbuf[1+(i*2)], 2);
}
- if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_EDRUM_PROGRAM, 0, drumbuf)) {
+ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) {
snd_printk ("download drum failed.\n");
return -(EIO);
}
voices[0] = 32;
- if (snd_wavefront_cmd (dev, WFC_SET_NVOICES, 0, voices)) {
+ if (snd_wavefront_cmd (dev, WFC_SET_NVOICES, NULL, voices)) {
snd_printk ("cannot set number of voices to 32.\n");
goto gone_bad;
}
MODULE_PARM_DESC(wss,"change between ACI/WSS-mixer; use 0 and 1 - untested"
" default: do nothing; for PCM1-pro only");
-#if DEBUG
+#ifdef DEBUG
static void print_bits(unsigned char c)
{
int j;
static inline int aci_rawwrite(unsigned char byte)
{
if (busy_wait() >= 0) {
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "aci_rawwrite(%d)\n", byte);
#endif
outb(byte, COMMAND_REGISTER);
if (busy_wait() >= 0) {
byte=inb(STATUS_REGISTER);
-#if DEBUG
+#ifdef DEBUG
printk(KERN_DEBUG "%d = aci_rawread()\n", byte);
#endif
return byte;
} isapnp_ad1816_list[] __initdata = {
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('A','D','S'), ISAPNP_FUNCTION(0x7150),
- 0 },
+ NULL },
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('A','D','S'), ISAPNP_FUNCTION(0x7180),
- 0 },
+ NULL },
{0}
};
ISAPNP_VENDOR('G','R','V'), ISAPNP_DEVICE(0x0001),
ISAPNP_VENDOR('G','R','V'), ISAPNP_FUNCTION(0x0000),
0, 0, 0, 1, 0},
- {0}
+ {NULL}
};
static struct isapnp_device_id id_table[] __devinitdata = {
{ "AC97_3D_CONTROL", 0x100 + AC97_3D_CONTROL, 16 },
{ "AC97_MODEM_RATE", 0x100 + AC97_MODEM_RATE, 16 },
{ "AC97_POWER_CONTROL", 0x100 + AC97_POWER_CONTROL, 16 },
- { 0 }
+ { NULL }
};
if (dev == NULL)
ssize_t ret = 0;
DECLARE_WAITQUEUE(wait, current);
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
down(&state->sem);
#if 0
if (dmabuf->mapped) {
ad1889_set_wav_rate(ad1889_dev, 44100);
ad1889_set_wav_fmt(ad1889_dev, AFMT_S16_LE);
AD1889_WRITEW(ad1889_dev, AD_DSWADA, 0x0404); /* attenuation */
- return 0;
+ return nonseekable_open(inode, file);
}
static int ad1889_release(struct inode *inode, struct file *file)
if ((err = ad1889_ac97_init(dev, 0)) != 0)
goto err_free_dsp;
- if (((proc_root = proc_mkdir("driver/ad1889", 0)) == NULL) ||
+ if (((proc_root = proc_mkdir("driver/ad1889", NULL)) == NULL) ||
create_proc_read_entry("ac97", S_IFREG|S_IRUGO, proc_root, ac97_read_proc, dev->ac97_codec) == NULL ||
create_proc_read_entry("info", S_IFREG|S_IRUGO, proc_root, ad1889_read_proc, dev) == NULL)
goto err_free_dsp;
err_free_mem:
ad1889_free_dev(dev);
- pci_set_drvdata(pcidev, 0);
+ pci_set_drvdata(pcidev, NULL);
return -ENODEV;
}
size_t count, loff_t * ppos)
{
struct ali_state *state = (struct ali_state *) file->private_data;
- struct ali_card *card = state ? state->card : 0;
+ struct ali_card *card = state ? state->card : NULL;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
#ifdef DEBUG2
printk("ali_audio: ali_read called, count = %d\n", count);
#endif
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & DAC_RUNNING)
const char __user *buffer, size_t count, loff_t * ppos)
{
struct ali_state *state = (struct ali_state *) file->private_data;
- struct ali_card *card = state ? state->card : 0;
+ struct ali_card *card = state ? state->card : NULL;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
#ifdef DEBUG2
printk("ali_audio: ali_write called, count = %d\n", count);
#endif
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & ADC_RUNNING)
state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
outl(0x00000000, card->iobase + ALI_INTERRUPTCR);
outl(0x00000000, card->iobase + ALI_INTERRUPTSR);
- return 0;
+ return nonseekable_open(inode, file);
}
static int ali_release(struct inode *inode, struct file *file)
if (card->ac97_codec[i] != NULL
&& card->ac97_codec[i]->dev_mixer == minor) {
file->private_data = card->ac97_codec[i];
- return 0;
+ return nonseekable_open(inode, file);
}
}
return -ENODEV;
static int au1000_open_mixdev(struct inode *inode, struct file *file)
{
file->private_data = &au1000_state;
- return 0;
+ return nonseekable_open(inode, file);
}
static int au1000_release_mixdev(struct inode *inode, struct file *file)
unsigned long flags;
int cnt, usercnt, avail;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
dbg("write: count=%d", count);
#endif
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
init_MUTEX(&s->sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int au1000_release(struct inode *inode, struct file *file)
}
VALIDATE_STATE(s);
file->private_data = s;
- return 0;
+ return nonseekable_open(inode, file);
}
static int cm_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
set_fmt(s, fmtm, fmts);
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int cm_release(struct inode *inode, struct file *file)
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs4281: cs4281_open_mixdev()- 0\n"));
- return 0;
+ return nonseekable_open(inode, file);
}
printk(KERN_INFO "cs4281: cs4281_read()+ %Zu \n", count));
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
count));
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
}
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2,
printk(KERN_INFO "cs4281: cs4281_open()- 0\n"));
- return 0;
+ return nonseekable_open(inode, file);
}
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
ret = 0;
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
ret = 0;
f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ |
FMODE_MIDI_WRITE);
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
tmp &= 0xFFFF;
tmp |= card->pctl;
CS_DBGOUT(CS_PARMS, 6, printk(
- "cs46xx: start_dac() poke card=0x%.08x tmp=0x%.08x addr=0x%.08x \n",
- (unsigned)card, (unsigned)tmp,
- (unsigned)card->ba1.idx[(BA1_PCTL >> 16) & 3]+(BA1_PCTL&0xffff) ) );
+ "cs46xx: start_dac() poke card=%p tmp=0x%.08x addr=%p \n",
+ card, (unsigned)tmp,
+ card->ba1.idx[(BA1_PCTL >> 16) & 3]+(BA1_PCTL&0xffff) ) );
cs461x_poke(card, BA1_PCTL, tmp);
}
spin_unlock_irqrestore(&card->lock, flags);
memset(dmabuf->rawbuf,
(dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80,
(unsigned)hwptr);
- memset((void *)((unsigned)dmabuf->rawbuf +
- dmabuf->dmasize + hwptr - diff),
+ memset((char *)dmabuf->rawbuf +
+ dmabuf->dmasize + hwptr - diff,
(dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80,
diff - hwptr);
}
unsigned ptr;
int cnt;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
ret = 0;
unsigned ptr;
int cnt;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
ret = 0;
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs46xx: CopySamples()+ ") );
CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
- " dst=0x%x src=0x%x count=%d fmt=0x%x\n",
- (unsigned)dst,(unsigned)src,(unsigned)count,(unsigned)fmt) );
+ " dst=%p src=%p count=%d fmt=0x%x\n",
+ dst,src,count,fmt) );
/*
* See if the data should be output as 8-bit unsigned stereo.
return -ENODEV;
dmabuf = &state->dmabuf;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
dmabuf->dmasize,dmabuf->count,buffer,ret) );
if (cs_copy_to_user(state, buffer,
- (void *)((unsigned)dmabuf->rawbuf + swptr), cnt, &copied))
+ (char *)dmabuf->rawbuf + swptr, cnt, &copied))
{
if (!ret) ret = -EFAULT;
goto out;
return -EFAULT;
dmabuf = &state->dmabuf;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
down(&state->sem);
if (dmabuf->mapped)
{
int ret = 0;
unsigned long size;
- CS_DBGOUT(CS_FUNCTION | CS_PARMS, 2, printk("cs46xx: cs_mmap()+ file=0x%x %s %s\n",
- (unsigned)file, vma->vm_flags & VM_WRITE ? "VM_WRITE" : "",
+ CS_DBGOUT(CS_FUNCTION | CS_PARMS, 2, printk("cs46xx: cs_mmap()+ file=%p %s %s\n",
+ file, vma->vm_flags & VM_WRITE ? "VM_WRITE" : "",
vma->vm_flags & VM_READ ? "VM_READ" : "") );
if (vma->vm_flags & VM_WRITE) {
* use the DAC only.
*/
state = card->states[1];
- if(!(unsigned)state)
- {
+ if (!state) {
ret = -EINVAL;
goto out;
}
{
struct cs_card *card = (struct cs_card *)file->private_data;
struct cs_state *state;
- struct dmabuf *dmabuf=0;
+ struct dmabuf *dmabuf=NULL;
unsigned long flags;
audio_buf_info abinfo;
count_info cinfo;
int ret=0;
unsigned int tmp;
- CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()+ file=0x%x %s %s\n",
- (unsigned)file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
+ CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()+ file=%p %s %s\n",
+ file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
file->f_mode & FMODE_READ ? "FMODE_READ" : "") );
list_for_each(entry, &cs46xx_devs)
return ret;
}
CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()- 0\n") );
- return 0;
+ return nonseekable_open(inode, file);
}
static int cs_release(struct inode *inode, struct file *file)
struct dmabuf *dmabuf;
struct cs_state *state;
unsigned int tmp;
- CS_DBGOUT(CS_RELEASE | CS_FUNCTION, 2, printk("cs46xx: cs_release()+ file=0x%x %s %s\n",
- (unsigned)file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
+ CS_DBGOUT(CS_RELEASE | CS_FUNCTION, 2, printk("cs46xx: cs_release()+ file=%p %s %s\n",
+ file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
file->f_mode & FMODE_READ ? "FMODE_READ" : "") );
if (!(file->f_mode & (FMODE_WRITE | FMODE_READ)))
{
unsigned int tmp;
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
- printk("cs46xx: cs46xx_suspend()+ flags=0x%x s=0x%x\n",
- (unsigned)card->pm.flags,(unsigned)card));
+ printk("cs46xx: cs46xx_suspend()+ flags=0x%x s=%p\n",
+ (unsigned)card->pm.flags,card));
/*
* check the current state, only suspend if IDLE
*/
CS_INC_USE_COUNT(&card->mixer_use_cnt);
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs46xx: cs_open_mixdev()- 0\n"));
- return 0;
+ return nonseekable_open(inode, file);
}
static int cs_release_mixdev(struct inode *inode, struct file *file)
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
"cs46xx: cs_ac97_init()- codec number %d not found\n",
num_ac97) );
- card->ac97_codec[num_ac97] = 0;
+ card->ac97_codec[num_ac97] = NULL;
break;
}
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
card->ac97_codec[num_ac97] = codec;
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
- "cs46xx: cs_ac97_init() ac97_codec[%d] set to 0x%x\n",
+ "cs46xx: cs_ac97_init() ac97_codec[%d] set to %p\n",
(unsigned int)num_ac97,
- (unsigned int)codec));
+ codec));
/* if there is no secondary codec at all, don't probe any more */
if (!ready_2nd)
{
card->ba1.name.reg = ioremap_nocache(card->ba1_addr + BA1_SP_REG, CS461X_BA1_REG_SIZE);
CS_DBGOUT(CS_INIT, 4, printk(KERN_INFO
- "cs46xx: card=0x%x card->ba0=0x%.08x\n",(unsigned)card,(unsigned)card->ba0) );
+ "cs46xx: card=%p card->ba0=%p\n",card,card->ba0) );
CS_DBGOUT(CS_INIT, 4, printk(KERN_INFO
- "cs46xx: card->ba1=0x%.08x 0x%.08x 0x%.08x 0x%.08x\n",
- (unsigned)card->ba1.name.data0,
- (unsigned)card->ba1.name.data1,
- (unsigned)card->ba1.name.pmem,
- (unsigned)card->ba1.name.reg) );
+ "cs46xx: card->ba1=%p %p %p %p\n",
+ card->ba1.name.data0,
+ card->ba1.name.data1,
+ card->ba1.name.pmem,
+ card->ba1.name.reg) );
if(card->ba0 == 0 || card->ba1.name.data0 == 0 ||
card->ba1.name.data1 == 0 || card->ba1.name.pmem == 0 ||
if (pmdev)
{
CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO
- "cs46xx: probe() pm_register() succeeded (0x%x).\n",
- (unsigned)pmdev));
+ "cs46xx: probe() pm_register() succeeded (%p).\n",
+ pmdev));
pmdev->data = card;
}
else
{
CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 2, printk(KERN_INFO
- "cs46xx: probe() pm_register() failed (0x%x).\n",
- (unsigned)pmdev));
+ "cs46xx: probe() pm_register() failed (%p).\n",
+ pmdev));
card->pm.flags |= CS46XX_PM_NOT_REGISTERED;
}
- CS_DBGOUT(CS_PM, 9, printk(KERN_INFO "cs46xx: pm.flags=0x%x card=0x%x\n",
- (unsigned)card->pm.flags,(unsigned)card));
+ CS_DBGOUT(CS_PM, 9, printk(KERN_INFO "cs46xx: pm.flags=0x%x card=%p\n",
+ (unsigned)card->pm.flags,card));
CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO
"cs46xx: probe()- device allocated successfully\n"));
struct cs_card *card;
CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
- "cs46xx: cs46xx_pm_callback dev=0x%x rqst=0x%x card=%d\n",
- (unsigned)dev,(unsigned)rqst,(unsigned)data));
+ "cs46xx: cs46xx_pm_callback dev=%p rqst=0x%x card=%p\n",
+ dev,(unsigned)rqst,data));
card = (struct cs_card *) dev->data;
if (card) {
switch(rqst) {
*/
static int cs46xx_suspend_tbl(struct pci_dev *pcidev, u32 state);
static int cs46xx_resume_tbl(struct pci_dev *pcidev);
-#define cs_pm_register(a, b, c) 0
+#define cs_pm_register(a, b, c) NULL
#define cs_pm_unregister_all(a)
#define CS46XX_SUSPEND_TBL cs46xx_suspend_tbl
#define CS46XX_RESUME_TBL cs46xx_resume_tbl
#ifndef _dmasound_h_
/*
- * linux/drivers/sound/dmasound/dmasound.h
+ * linux/sound/oss/dmasound/dmasound.h
*
*
* Minor numbers for the sound driver.
#define le2be16dbl(x) (((x)<<8 & 0xff00ff00) | ((x)>>8 & 0x00ff00ff))
#define IOCTL_IN(arg, ret) \
- do { int error = get_user(ret, (int *)(arg)); \
+ do { int error = get_user(ret, (int __user *)(arg)); \
if (error) return error; \
} while (0)
-#define IOCTL_OUT(arg, ret) ioctl_return((int *)(arg), ret)
+#define IOCTL_OUT(arg, ret) ioctl_return((int __user *)(arg), ret)
-static inline int ioctl_return(int *addr, int value)
+static inline int ioctl_return(int __user *addr, int value)
{
return value < 0 ? value : put_user(value, addr);
}
*/
typedef struct {
- ssize_t (*ct_ulaw)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_alaw)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_s8)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_u8)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_s16be)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_u16be)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_s16le)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
- ssize_t (*ct_u16le)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_ulaw)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_alaw)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_s8)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_u8)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_s16be)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_u16be)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_s16le)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_u16le)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
} TRANS;
struct sound_settings {
/*
- * linux/drivers/sound/dmasound/dmasound_atari.c
+ * linux/sound/oss/dmasound/dmasound_atari.c
*
* Atari TT and Falcon DMA Sound Driver
*
- * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and credits
+ * See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
/*
- * linux/drivers/sound/dmasound/dmasound_awacs.c
+ * linux/sound/oss/dmasound/dmasound_awacs.c
*
* PowerMac `AWACS' and `Burgundy' DMA Sound Driver
* with some limited support for DACA & Tumbler
*
- * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and
+ * See linux/sound/oss/dmasound/dmasound_core.c for copyright and
* history prior to 2001/01/26.
*
* 26/01/2001 ed 0.1 Iain Sandoe
#undef IOCTL_OUT
#define IOCTL_IN(arg, ret) \
- rc = get_user(ret, (int *)(arg)); \
+ rc = get_user(ret, (int __user *)(arg)); \
if (rc) break;
#define IOCTL_OUT(arg, ret) \
- ioctl_return2((int *)(arg), ret)
+ ioctl_return2((int __user *)(arg), ret)
-static inline int ioctl_return2(int *addr, int value)
+static inline int ioctl_return2(int __user *addr, int value)
{
return value < 0 ? value : put_user(value, addr);
}
write_audio_gpio(gpio_audio_reset, !gpio_audio_reset_pol);
msleep(100);
if (gpio_headphone_irq) {
- if (request_irq(gpio_headphone_irq,headphone_intr,0,"Headphone detect",0) < 0) {
+ if (request_irq(gpio_headphone_irq,headphone_intr,0,"Headphone detect",NULL) < 0) {
printk(KERN_ERR "tumbler: Can't request headphone interrupt\n");
gpio_headphone_irq = 0;
} else {
val = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, gpio_headphone_detect, 0);
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, gpio_headphone_detect, val | 0x80);
/* Trigger it */
- headphone_intr(0,0,0);
+ headphone_intr(0,NULL,NULL);
}
}
if (!gpio_headphone_irq) {
tas_dmasound_cleanup(void)
{
if (gpio_headphone_irq)
- free_irq(gpio_headphone_irq, 0);
+ free_irq(gpio_headphone_irq, NULL);
return 0;
}
static int
tas_mixer_ioctl(u_int cmd, u_long arg)
{
+ int __user *argp = (int __user *)arg;
int data;
int rc;
if ((cmd & ~0xff) == MIXER_WRITE(0) &&
tas_supported_mixers() & (1<<(cmd & 0xff))) {
- rc = get_user(data, (int *)(arg));
+ rc = get_user(data, argp);
if (rc<0) return rc;
tas_set_mixer_level(cmd & 0xff, data);
tas_get_mixer_level(cmd & 0xff, &data);
- return ioctl_return2((int *)(arg), data);
+ return ioctl_return2(argp, data);
}
if ((cmd & ~0xff) == MIXER_READ(0) &&
tas_supported_mixers() & (1<<(cmd & 0xff))) {
tas_get_mixer_level(cmd & 0xff, &data);
- return ioctl_return2((int *)(arg), data);
+ return ioctl_return2(argp, data);
}
switch(cmd) {
static int __init PMacIrqInit(void)
{
if (awacs)
- if (request_irq(awacs_irq, pmac_awacs_intr, 0, "Built-in Sound misc", 0))
+ if (request_irq(awacs_irq, pmac_awacs_intr, 0, "Built-in Sound misc", NULL))
return 0;
- if (request_irq(awacs_tx_irq, pmac_awacs_tx_intr, 0, "Built-in Sound out", 0)
- || request_irq(awacs_rx_irq, pmac_awacs_rx_intr, 0, "Built-in Sound in", 0))
+ if (request_irq(awacs_tx_irq, pmac_awacs_tx_intr, 0, "Built-in Sound out", NULL)
+ || request_irq(awacs_rx_irq, pmac_awacs_rx_intr, 0, "Built-in Sound in", NULL))
return 0;
return 1;
}
msleep(200);
}
if (awacs)
- free_irq(awacs_irq, 0);
- free_irq(awacs_tx_irq, 0);
- free_irq(awacs_rx_irq, 0);
+ free_irq(awacs_irq, NULL);
+ free_irq(awacs_tx_irq, NULL);
+ free_irq(awacs_rx_irq, NULL);
if (awacs)
iounmap((void *)awacs);
write_audio_gpio(gpio_audio_reset, !gpio_audio_reset_pol);
msleep(150);
tas_leave_sleep(); /* Stub for now */
- headphone_intr(0,0,0);
+ headphone_intr(0,NULL,NULL);
break;
case AWACS_DACA:
msleep(10); /* Check this !!! */
sound_device_id = 0;
/* device ID appears post g3 b&w */
- prop = (unsigned int *)get_property(info, "device-id", 0);
+ prop = (unsigned int *)get_property(info, "device-id", NULL);
if (prop != 0)
sound_device_id = *prop;
} else if (is_pbook_g3) {
struct device_node* mio;
- macio_base = 0;
+ macio_base = NULL;
for (mio = io->parent; mio; mio = mio->parent) {
if (strcmp(mio->name, "mac-io") == 0
&& mio->n_addrs > 0) {
/*
- * linux/drivers/sound/dmasound/dmasound_core.c
+ * linux/sound/oss/dmasound/dmasound_core.c
*
*
* OSS/Free compatible Atari TT/Falcon and Amiga DMA sound driver for
return stereo;
}
-static ssize_t sound_copy_translate(TRANS *trans, const u_char *userPtr,
+static ssize_t sound_copy_translate(TRANS *trans, const u_char __user *userPtr,
size_t userCount, u_char frame[],
ssize_t *frameUsed, ssize_t frameLeft)
{
- ssize_t (*ct_func)(const u_char *, size_t, u_char *, ssize_t *, ssize_t);
+ ssize_t (*ct_func)(const u_char __user *, size_t, u_char *, ssize_t *, ssize_t);
switch (dmasound.soft.format) {
case AFMT_MU_LAW:
strlcpy(info.id, dmasound.mach.name2, sizeof(info.id));
strlcpy(info.name, dmasound.mach.name2, sizeof(info.name));
info.modify_counter = mixer.modify_counter;
- if (copy_to_user((int *)arg, &info, sizeof(info)))
+ if (copy_to_user((void __user *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
while (i--)
dmasound.mach.dma_free(sq->buffers[i], size);
kfree(sq->buffers);
- sq->buffers = 0;
+ sq->buffers = NULL;
return -ENOMEM;
}
}
static int sq_setup(struct sound_queue *sq)
{
- int (*setup_func)(void) = 0;
+ int (*setup_func)(void) = NULL;
int hard_frame ;
if (sq->locked) { /* are we already set? - and not changeable */
dmasound.mach.play();
}
-static ssize_t sq_write(struct file *file, const char *src, size_t uLeft,
+static ssize_t sq_write(struct file *file, const char __user *src, size_t uLeft,
loff_t *ppos)
{
ssize_t uWritten = 0;
* it and restart the DMA.
*/
-static ssize_t sq_read(struct file *file, char *dst, size_t uLeft,
+static ssize_t sq_read(struct file *file, char __user *dst, size_t uLeft,
loff_t *ppos)
{
info.fragstotal = write_sq.max_active;
info.fragsize = write_sq.user_frag_size;
info.bytes = info.fragments * info.fragsize;
- if (copy_to_user((void *)arg, &info, sizeof(info)))
+ if (copy_to_user((void __user *)arg, &info, sizeof(info)))
return -EFAULT;
return 0;
} else
return 0;
}
-static ssize_t state_read(struct file *file, char *buf, size_t count,
+static ssize_t state_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
int n = state.len - state.ptr;
/*
- * linux/drivers/sound/dmasound/dmasound_paula.c
+ * linux/sound/oss/dmasound/dmasound_paula.c
*
* Amiga `Paula' DMA Sound Driver
*
- * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and credits
+ * See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
/*
- * linux/drivers/sound/dmasound/dmasound_q40.c
+ * linux/sound/oss/dmasound/dmasound_q40.c
*
* Q40 DMA Sound Driver
*
- * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and credits
+ * See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
{
int rc;
struct tas_biquad_ctrl_t biquad;
+ void __user *argp = (void __user *)arg;
- if (copy_from_user((void *)&biquad, (const void *)arg, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_from_user(&biquad, argp, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3001c_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user((void *)arg, (const void *)&biquad, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_to_user(argp, &biquad, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
int i,j;
char sync_required[2][6];
struct tas_biquad_ctrl_t biquad;
+ struct tas_biquad_ctrl_list_t __user *argp = (void __user *)arg;
memset(sync_required,0,sizeof(sync_required));
- if (copy_from_user((void *)&filter_count,
- (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,filter_count),
- sizeof(int))) {
+ if (copy_from_user(&filter_count, &argp->filter_count, sizeof(int)))
return -EFAULT;
- }
- if (copy_from_user((void *)&flags,
- (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,flags),
- sizeof(int))) {
+ if (copy_from_user(&flags, &argp->flags, sizeof(int)))
return -EFAULT;
- }
if (cmd & SIOC_IN) {
}
for (i=0; i < filter_count; i++) {
- if (copy_from_user((void *)&biquad,
- (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
+ if (copy_from_user(&biquad, &argp->biquads[i],
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3001c_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user((void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
- (const void *)&biquad,
+ if (copy_to_user(&argp->biquads[i], &biquad,
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
{
int rc;
struct tas_drce_ctrl_t drce_ctrl;
+ void __user *argp = (void __user *)arg;
- if (copy_from_user((void *)&drce_ctrl,
- (const void *)arg,
- sizeof(struct tas_drce_ctrl_t))) {
+ if (copy_from_user(&drce_ctrl, argp, sizeof(struct tas_drce_ctrl_t)))
return -EFAULT;
- }
#ifdef DEBUG_DRCE
printk("DRCE IOCTL: input [ FLAGS:%x ENABLE:%x THRESH:%x\n",
if (drce_ctrl.flags & TAS_DRCE_THRESHOLD)
drce_ctrl.data.threshold = self->drce_state.threshold;
- if (copy_to_user((void *)arg,
- (const void *)&drce_ctrl,
+ if (copy_to_user(argp, &drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
u_int cmd,
u_long arg)
{
+ uint __user *argp = (void __user *)arg;
switch (cmd) {
case TAS_READ_EQ:
case TAS_WRITE_EQ:
return tas3001c_eq_list_rw(self, cmd, arg);
case TAS_READ_EQ_FILTER_COUNT:
- put_user(TAS3001C_BIQUAD_FILTER_COUNT, (uint *)(arg));
+ put_user(TAS3001C_BIQUAD_FILTER_COUNT, argp);
return 0;
case TAS_READ_EQ_CHANNEL_COUNT:
- put_user(TAS3001C_BIQUAD_CHANNEL_COUNT, (uint *)(arg));
+ put_user(TAS3001C_BIQUAD_CHANNEL_COUNT, argp);
return 0;
case TAS_READ_DRCE:
return tas3001c_drce_rw(self, cmd, arg);
case TAS_READ_DRCE_CAPS:
- put_user(TAS_DRCE_ENABLE | TAS_DRCE_THRESHOLD, (uint *)(arg));
+ put_user(TAS_DRCE_ENABLE | TAS_DRCE_THRESHOLD, argp);
return 0;
case TAS_READ_DRCE_MIN:
case TAS_READ_DRCE_MAX: {
struct tas_drce_ctrl_t drce_ctrl;
- if (copy_from_user((void *)&drce_ctrl,
- (const void *)arg,
+ if (copy_from_user(&drce_ctrl, argp,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
}
}
- if (copy_to_user((void *)arg,
- (const void *)&drce_ctrl,
+ if (copy_to_user(argp, &drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
static struct tas_drce_t eqp_0e_2_1_drce = {
.enable = 1,
- .above { .val = 3.0 * (1<<8), .expand = 0 },
- .below { .val = 1.0 * (1<<8), .expand = 0 },
- .threshold -15.33 * (1<<8),
- .energy 2.4 * (1<<12),
- .attack 0.013 * (1<<12),
- .decay 0.212 * (1<<12),
+ .above = { .val = 3.0 * (1<<8), .expand = 0 },
+ .below = { .val = 1.0 * (1<<8), .expand = 0 },
+ .threshold = -15.33 * (1<<8),
+ .energy = 2.4 * (1<<12),
+ .attack = 0.013 * (1<<12),
+ .decay = 0.212 * (1<<12),
};
static struct tas_biquad_ctrl_t eqp_0e_2_1_biquads[]={
u_int cmd,
u_long arg)
{
+ void __user *argp = (void __user *)arg;
int rc;
struct tas_biquad_ctrl_t biquad;
- if (copy_from_user((void *)&biquad, (const void *)arg, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_from_user((void *)&biquad, argp, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3004_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user((void *)arg, (const void *)&biquad, sizeof(struct tas_biquad_ctrl_t))) {
+ if (copy_to_user(argp, &biquad, sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
int i,j;
char sync_required[TAS3004_BIQUAD_CHANNEL_COUNT][TAS3004_BIQUAD_FILTER_COUNT];
struct tas_biquad_ctrl_t biquad;
+ struct tas_biquad_ctrl_list_t __user *argp = (void __user *)arg;
memset(sync_required,0,sizeof(sync_required));
- if (copy_from_user((void *)&filter_count,
- (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,filter_count),
- sizeof(int))) {
+ if (copy_from_user(&filter_count, &argp->filter_count, sizeof(int)))
return -EFAULT;
- }
- if (copy_from_user((void *)&flags,
- (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t,flags),
- sizeof(int))) {
+ if (copy_from_user(&flags, &argp->flags, sizeof(int)))
return -EFAULT;
- }
if (cmd & SIOC_IN) {
}
for (i=0; i < filter_count; i++) {
- if (copy_from_user((void *)&biquad,
- (const void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
+ if (copy_from_user(&biquad, &argp->biquads[i],
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
rc=tas3004_read_biquad(self, biquad.channel, biquad.filter, &biquad.data);
if (rc != 0) return rc;
- if (copy_to_user((void *)arg + offsetof(struct tas_biquad_ctrl_list_t, biquads[i]),
- (const void *)&biquad,
+ if (copy_to_user(&argp->biquads[i], &biquad,
sizeof(struct tas_biquad_ctrl_t))) {
return -EFAULT;
}
{
int rc;
struct tas_drce_ctrl_t drce_ctrl;
+ void __user *argp = (void __user *)arg;
- if (copy_from_user((void *)&drce_ctrl,
- (const void *)arg,
- sizeof(struct tas_drce_ctrl_t))) {
+ if (copy_from_user(&drce_ctrl, argp, sizeof(struct tas_drce_ctrl_t)))
return -EFAULT;
- }
#ifdef DEBUG_DRCE
printk("DRCE: input [ FLAGS:%x ENABLE:%x ABOVE:%x/%x BELOW:%x/%x THRESH:%x ENERGY:%x ATTACK:%x DECAY:%x\n",
if (drce_ctrl.flags & TAS_DRCE_DECAY)
drce_ctrl.data.decay = self->drce_state.decay;
- if (copy_to_user((void *)arg,
- (const void *)&drce_ctrl,
+ if (copy_to_user(argp, &drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
u_int cmd,
u_long arg)
{
+ uint __user *argp = (void __user *)arg;
switch (cmd) {
case TAS_READ_EQ:
case TAS_WRITE_EQ:
return tas3004_eq_list_rw(self, cmd, arg);
case TAS_READ_EQ_FILTER_COUNT:
- put_user(TAS3004_BIQUAD_FILTER_COUNT, (uint *)(arg));
+ put_user(TAS3004_BIQUAD_FILTER_COUNT, argp);
return 0;
case TAS_READ_EQ_CHANNEL_COUNT:
- put_user(TAS3004_BIQUAD_CHANNEL_COUNT, (uint *)(arg));
+ put_user(TAS3004_BIQUAD_CHANNEL_COUNT, argp);
return 0;
case TAS_READ_DRCE:
TAS_DRCE_ENERGY |
TAS_DRCE_ATTACK |
TAS_DRCE_DECAY,
- (uint *)(arg));
+ argp);
return 0;
case TAS_READ_DRCE_MIN:
struct tas_drce_ctrl_t drce_ctrl;
const struct tas_drce_t *drce_copy;
- if (copy_from_user((void *)&drce_ctrl,
- (const void *)arg,
+ if (copy_from_user(&drce_ctrl, argp,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
drce_ctrl.data.decay=drce_copy->decay;
}
- if (copy_to_user((void *)arg,
- (const void *)&drce_ctrl,
+ if (copy_to_user(argp, &drce_ctrl,
sizeof(struct tas_drce_ctrl_t))) {
return -EFAULT;
}
/*
- * linux/drivers/sound/dmasound/trans_16.c
+ * linux/sound/oss/dmasound/trans_16.c
*
* 16 bit translation routines. Only used by Power mac at present.
*
- * See linux/drivers/sound/dmasound/dmasound_core.c for copyright and
+ * See linux/sound/oss/dmasound/dmasound_core.c for copyright and
* history prior to 08/02/2001.
*
* 08/02/2001 Iain Sandoe
static short dmasound_alaw2dma16[] ;
static short dmasound_ulaw2dma16[] ;
-static ssize_t pmac_ct_law(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_s8(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_u8(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_s16(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_u16(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_law(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_s8(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_u8(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_s16(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s16(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ctx_u16(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u16(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_s16_read(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16_read(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t pmac_ct_u16_read(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16_read(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static int expand_data; /* Data for expanding */
-static ssize_t pmac_ct_law(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ct_s8(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ct_u8(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ct_s16(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
userCount >>= (stereo? 2: 1);
used = count = min_t(unsigned long, userCount, frameLeft);
if (!stereo) {
- short *up = (short *) userPtr;
+ short __user *up = (short __user *) userPtr;
while (count > 0) {
short data;
if (get_user(data, up++))
return stereo? used * 4: used * 2;
}
-static ssize_t pmac_ct_u16(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
int stereo = dmasound.soft.stereo;
short *fp = (short *) &frame[*frameUsed];
- short *up = (short *) userPtr;
+ short __user *up = (short __user *) userPtr;
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
}
-static ssize_t pmac_ctx_law(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
return stereo? utotal * 2: utotal;
}
-static ssize_t pmac_ctx_s8(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ctx_u8(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
}
-static ssize_t pmac_ctx_s16(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s16(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
unsigned int *p = (unsigned int *) &frame[*frameUsed];
unsigned int data = expand_data;
- unsigned short *up = (unsigned short *) userPtr;
+ unsigned short __user *up = (unsigned short __user *) userPtr;
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int stereo = dmasound.soft.stereo;
}
-static ssize_t pmac_ctx_u16(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u16(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
unsigned int *p = (unsigned int *) &frame[*frameUsed];
unsigned int data = expand_data;
- unsigned short *up = (unsigned short *) userPtr;
+ unsigned short __user *up = (unsigned short __user *) userPtr;
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int stereo = dmasound.soft.stereo;
/* data in routines... */
-static ssize_t pmac_ct_s8_read(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_s8_read(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
val = *p++;
val = (val * software_input_volume) >> 7;
data = val >> 8;
- if (put_user(data, (u_char *)userPtr++))
+ if (put_user(data, (u_char __user *)userPtr++))
return -EFAULT;
if (stereo) {
val = *p;
val = (val * software_input_volume) >> 7;
data = val >> 8;
- if (put_user(data, (u_char *)userPtr++))
+ if (put_user(data, (u_char __user *)userPtr++))
return -EFAULT;
}
p++;
}
-static ssize_t pmac_ct_u8_read(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_u8_read(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
val = *p++;
val = (val * software_input_volume) >> 7;
data = (val >> 8) ^ 0x80;
- if (put_user(data, (u_char *)userPtr++))
+ if (put_user(data, (u_char __user *)userPtr++))
return -EFAULT;
if (stereo) {
val = *p;
val = (val * software_input_volume) >> 7;
data = (val >> 8) ^ 0x80;
- if (put_user(data, (u_char *)userPtr++))
+ if (put_user(data, (u_char __user *)userPtr++))
return -EFAULT;
}
p++;
return stereo? used * 2: used;
}
-static ssize_t pmac_ct_s16_read(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_s16_read(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
int stereo = dmasound.soft.stereo;
short *fp = (short *) &frame[*frameUsed];
- short *up = (short *) userPtr;
+ short __user *up = (short __user *) userPtr;
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
return stereo? used * 4: used * 2;
}
-static ssize_t pmac_ct_u16_read(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ct_u16_read(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
int stereo = dmasound.soft.stereo;
short *fp = (short *) &frame[*frameUsed];
- short *up = (short *) userPtr;
+ short __user *up = (short __user *) userPtr;
frameLeft >>= 2;
userCount >>= (stereo? 2: 1);
/* data in routines (reducing speed)... */
-static ssize_t pmac_ctx_s8_read(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s8_read(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
p++;
if (bal < 0) {
data = vall >> 8;
- if (put_user(data, (u_char *)userPtr++))
+ if (put_user(data, (u_char __user *)userPtr++))
return -EFAULT;
if (stereo) {
data = valr >> 8;
- if (put_user(data, (u_char *)userPtr++))
+ if (put_user(data, (u_char __user *)userPtr++))
return -EFAULT;
}
userCount--;
}
-static ssize_t pmac_ctx_u8_read(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u8_read(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
p++;
if (bal < 0) {
data = (vall >> 8) ^ 0x80;
- if (put_user(data, (u_char *)userPtr++))
+ if (put_user(data, (u_char __user *)userPtr++))
return -EFAULT;
if (stereo) {
data = (valr >> 8) ^ 0x80;
- if (put_user(data, (u_char *)userPtr++))
+ if (put_user(data, (u_char __user *)userPtr++))
return -EFAULT;
}
userCount--;
return stereo? utotal * 2: utotal;
}
-static ssize_t pmac_ctx_s16_read(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_s16_read(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int bal = expand_read_bal;
short *fp = (short *) &frame[*frameUsed];
- short *up = (short *) userPtr;
+ short __user *up = (short __user *) userPtr;
int stereo = dmasound.soft.stereo;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
return stereo? utotal * 4: utotal * 2;
}
-static ssize_t pmac_ctx_u16_read(const u_char *userPtr, size_t userCount,
+static ssize_t pmac_ctx_u16_read(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
int bal = expand_read_bal;
int mask = (dmasound.soft.format == AFMT_U16_LE? 0x0080: 0x8000);
short *fp = (short *) &frame[*frameUsed];
- short *up = (short *) userPtr;
+ short __user *up = (short __user *) userPtr;
int stereo = dmasound.soft.stereo;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
DPD(3, "emu10k1_audio_read(), buffer=%p, count=%d\n", buffer, (u32) count);
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
DPD(3, "emu10k1_audio_write(), buffer=%p, count=%d\n", buffer, (u32) count);
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
file->private_data = (void *) wave_dev;
- return 0;
+ return nonseekable_open(inode, file);
}
static int emu10k1_audio_release(struct inode *inode, struct file *file)
{
char s[48];
- if (!proc_mkdir ("driver/emu10k1", 0)) {
+ if (!proc_mkdir ("driver/emu10k1", NULL)) {
printk(KERN_ERR "emu10k1: unable to create proc directory driver/emu10k1\n");
goto err_out;
}
sprintf(s, "driver/emu10k1/%s", pci_name(card->pci_dev));
- if (!proc_mkdir (s, 0)) {
+ if (!proc_mkdir (s, NULL)) {
printk(KERN_ERR "emu10k1: unable to create proc directory %s\n", s);
goto err_emu10k1_proc;
}
sprintf(s, "driver/emu10k1/%s/info", pci_name(card->pci_dev));
- if (!create_proc_read_entry (s, 0, 0, emu10k1_info_proc, card)) {
+ if (!create_proc_read_entry (s, 0, NULL, emu10k1_info_proc, card)) {
printk(KERN_ERR "emu10k1: unable to create proc entry %s\n", s);
goto err_dev_proc;
}
if (!card->is_aps) {
sprintf(s, "driver/emu10k1/%s/ac97", pci_name(card->pci_dev));
- if (!create_proc_read_entry (s, 0, 0, ac97_read_proc, card->ac97)) {
+ if (!create_proc_read_entry (s, 0, NULL, ac97_read_proc, card->ac97)) {
printk(KERN_ERR "emu10k1: unable to create proc entry %s\n", s);
goto err_proc_ac97;
}
up(&card->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int emu10k1_midi_release(struct inode *inode, struct file *file)
DPD(4, "emu10k1_midi_read(), count %#x\n", (u32) count);
- if (pos != &file->f_pos)
- return -ESPIPE;
-
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
DPD(4, "emu10k1_midi_write(), count=%#x\n", (u32) count);
- if (pos != &file->f_pos)
- return -ESPIPE;
-
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (card->seq_mididev) {
kfree(card->seq_mididev);
- card->seq_mididev = 0;
+ card->seq_mididev = NULL;
}
}
}
VALIDATE_STATE(s);
file->private_data = s;
- return 0;
+ return nonseekable_open(inode, file);
}
static int es1370_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac2.mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
init_MUTEX(&s->sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int es1370_release(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac1.mapped)
return -ENXIO;
if (!s->dma_dac1.ready && (ret = prog_dmabuf_dac1(s)))
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= FMODE_DAC;
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int es1370_release_dac(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int es1370_midi_release(struct inode *inode, struct file *file)
}
VALIDATE_STATE(s);
file->private_data = s;
- return 0;
+ return nonseekable_open(inode, file);
}
static int es1371_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac2.mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
init_MUTEX(&s->sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int es1371_release(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac1.mapped)
return -ENXIO;
if (!s->dma_dac1.ready && (ret = prog_dmabuf_dac1(s)))
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= FMODE_DAC;
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int es1371_release_dac(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int es1371_midi_release(struct inode *inode, struct file *file)
return -ENODEV;
VALIDATE_STATE(s);
file->private_data = s;
- return 0;
+ return nonseekable_open(inode, file);
}
static int solo1_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
prog_codec(s);
- return 0;
+ return nonseekable_open(inode, file);
}
static /*const*/ struct file_operations solo1_audio_fops = {
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int solo1_midi_release(struct inode *inode, struct file *file)
outb(1, s->sbbase+3); /* enable OPL3 */
s->open_mode |= FMODE_DMFM;
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int solo1_dmfm_release(struct inode *inode, struct file *file)
if (file->f_mode & FMODE_READ)
forte_channel_init (forte, &forte->rec);
- return 0;
+ return nonseekable_open(inode, file);
}
unsigned int i = bytes, sz = 0;
unsigned long flags;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (!access_ok (VERIFY_READ, buffer, bytes))
return -EFAULT;
unsigned int i = bytes, sz;
unsigned long flags;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (!access_ok (VERIFY_WRITE, buffer, bytes))
return -EFAULT;
static int __init
forte_proc_init (void)
{
- if (!proc_mkdir ("driver/forte", 0))
+ if (!proc_mkdir ("driver/forte", NULL))
return -EIO;
- if (!create_proc_read_entry ("driver/forte/chip", 0, 0, forte_proc_read, forte)) {
+ if (!create_proc_read_entry ("driver/forte/chip", 0, NULL, forte_proc_read, forte)) {
remove_proc_entry ("driver/forte", NULL);
return -EIO;
}
- if (!create_proc_read_entry("driver/forte/ac97", 0, 0, ac97_read_proc, forte->ac97)) {
+ if (!create_proc_read_entry("driver/forte/ac97", 0, NULL, ac97_read_proc, forte->ac97)) {
remove_proc_entry ("driver/forte/chip", NULL);
remove_proc_entry ("driver/forte", NULL);
return -EIO;
{
gus_wave_init(hw_config);
- request_region(hw_config->io_base, 16, "GUS");
- request_region(hw_config->io_base + 0x100, 12, "GUS"); /* 0x10c-> is MAX */
-
if (sound_alloc_dma(hw_config->dma, "GUS"))
printk(KERN_ERR "gus_card.c: Can't allocate DMA channel %d\n", hw_config->dma);
if (hw_config->dma2 != -1 && hw_config->dma2 != hw_config->dma)
printk(KERN_ERR "GUS: Unsupported IRQ %d\n", irq);
return 0;
}
- if (check_region(hw_config->io_base, 16))
- printk(KERN_ERR "GUS: I/O range conflict (1)\n");
- else if (check_region(hw_config->io_base + 0x100, 16))
- printk(KERN_ERR "GUS: I/O range conflict (2)\n");
- else if (gus_wave_detect(hw_config->io_base))
+ if (gus_wave_detect(hw_config->io_base))
return 1;
#ifndef EXCLUDE_GUS_IODETECT
* Look at the possible base addresses (0x2X0, X=1, 2, 3, 4, 5, 6)
*/
- for (io_addr = 0x210; io_addr <= 0x260; io_addr += 0x10)
- if (io_addr != hw_config->io_base) /*
- * Already tested
- */
- if (!check_region(io_addr, 16))
- if (!check_region(io_addr + 0x100, 16))
- if (gus_wave_detect(io_addr))
- {
- hw_config->io_base = io_addr;
- return 1;
- }
+ for (io_addr = 0x210; io_addr <= 0x260; io_addr += 0x10) {
+ if (io_addr == hw_config->io_base) /* Already tested */
+ continue;
+ if (gus_wave_detect(io_addr)) {
+ hw_config->io_base = io_addr;
+ return 1;
+ }
+ }
#endif
printk("NO GUS card found !\n");
unsigned long loc;
unsigned char val;
+ if (!request_region(baseaddr, 16, "GUS"))
+ return 0;
+ if (!request_region(baseaddr + 0x100, 12, "GUS")) { /* 0x10c-> is MAX */
+ release_region(baseaddr, 16);
+ return 0;
+ }
+
gus_base = baseaddr;
gus_write8(0x4c, 0); /* Reset GF1 */
/* See if there is first block there.... */
gus_poke(0L, 0xaa);
- if (gus_peek(0L) != 0xaa)
- return (0);
+ if (gus_peek(0L) != 0xaa) {
+ release_region(baseaddr + 0x100, 12);
+ release_region(baseaddr, 16);
+ return 0;
+ }
/* Now zero it out so that I can check for mirroring .. */
gus_poke(0L, 0x00);
if (hal2) {
file->private_data = hal2;
- return 0;
+ return nonseekable_open(inode, file);
}
return -ENODEV;
}
if (!count)
return 0;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (down_interruptible(&adc->sem))
return -EINTR;
if (file->f_flags & O_NONBLOCK) {
if (!count)
return 0;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (down_interruptible(&dac->sem))
return -EINTR;
if (file->f_flags & O_NONBLOCK) {
dac->usecount++;
}
- return 0;
+ return nonseekable_open(inode, file);
}
static int hal2_release(struct inode *inode, struct file *file)
/* extract register offset from codec struct */
#define IO_REG_OFF(codec) (((struct i810_card *) codec->private_data)->ac97_id_map[codec->id])
-#define GET_CIV(port) MODULOP2(inb((port) + OFF_CIV), SG_LEN)
-#define GET_LVI(port) MODULOP2(inb((port) + OFF_LVI), SG_LEN)
+#define I810_IOREAD(size, type, card, off) \
+({ \
+ type val; \
+ if (card->use_mmio) \
+ val=read##size(card->iobase_mmio+off); \
+ else \
+ val=in##size(card->iobase+off); \
+ val; \
+})
+
+#define I810_IOREADL(card, off) I810_IOREAD(l, u32, card, off)
+#define I810_IOREADW(card, off) I810_IOREAD(w, u16, card, off)
+#define I810_IOREADB(card, off) I810_IOREAD(b, u8, card, off)
+
+#define I810_IOWRITE(size, val, card, off) \
+({ \
+ if (card->use_mmio) \
+ write##size(val, card->iobase_mmio+off); \
+ else \
+ out##size(val, card->iobase+off); \
+})
+
+#define I810_IOWRITEL(val, card, off) I810_IOWRITE(l, val, card, off)
+#define I810_IOWRITEW(val, card, off) I810_IOWRITE(w, val, card, off)
+#define I810_IOWRITEB(val, card, off) I810_IOWRITE(b, val, card, off)
+
+#define GET_CIV(card, port) MODULOP2(I810_IOREADB((card), (port) + OFF_CIV), SG_LEN)
+#define GET_LVI(card, port) MODULOP2(I810_IOREADB((card), (port) + OFF_LVI), SG_LEN)
/* set LVI from CIV */
-#define CIV_TO_LVI(port, off) \
- outb(MODULOP2(GET_CIV((port)) + (off), SG_LEN), (port) + OFF_LVI)
+#define CIV_TO_LVI(card, port, off) \
+ I810_IOWRITEB(MODULOP2(GET_CIV((card), (port)) + (off), SG_LEN), (card), (port) + OFF_LVI)
static struct i810_card *devs = NULL;
return 0;
if (rec)
- port = state->card->iobase + dmabuf->read_channel->port;
+ port = dmabuf->read_channel->port;
else
- port = state->card->iobase + dmabuf->write_channel->port;
+ port = dmabuf->write_channel->port;
if(state->card->pci_id == PCI_DEVICE_ID_SI_7012) {
port_picb = port + OFF_SR;
port_picb = port + OFF_PICB;
do {
- civ = GET_CIV(port);
- offset = inw(port_picb);
+ civ = GET_CIV(state->card, port);
+ offset = I810_IOREADW(state->card, port_picb);
/* Must have a delay here! */
if(offset == 0)
udelay(1);
* that we won't have to worry about the chip still being
* out of sync with reality ;-)
*/
- } while (civ != GET_CIV(port) || offset != inw(port_picb));
+ } while (civ != GET_CIV(state->card, port) || offset != I810_IOREADW(state->card, port_picb));
return (((civ + 1) * dmabuf->fragsize - (bytes * offset))
% dmabuf->dmasize);
struct i810_card *card = state->card;
dmabuf->enable &= ~ADC_RUNNING;
- outb(0, card->iobase + PI_CR);
+ I810_IOWRITEB(0, card, PI_CR);
// wait for the card to acknowledge shutdown
- while( inb(card->iobase + PI_CR) != 0 ) ;
+ while( I810_IOREADB(card, PI_CR) != 0 ) ;
// now clear any latent interrupt bits (like the halt bit)
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- outb( inb(card->iobase + PI_PICB), card->iobase + PI_PICB );
+ I810_IOWRITEB( I810_IOREADB(card, PI_PICB), card, PI_PICB );
else
- outb( inb(card->iobase + PI_SR), card->iobase + PI_SR );
- outl( inl(card->iobase + GLOB_STA) & INT_PI, card->iobase + GLOB_STA);
+ I810_IOWRITEB( I810_IOREADB(card, PI_SR), card, PI_SR );
+ I810_IOWRITEL( I810_IOREADL(card, GLOB_STA) & INT_PI, card, GLOB_STA);
}
static void stop_adc(struct i810_state *state)
(dmabuf->trigger & PCM_ENABLE_INPUT)) {
dmabuf->enable |= ADC_RUNNING;
// Interrupt enable, LVI enable, DMA enable
- outb(0x10 | 0x04 | 0x01, state->card->iobase + PI_CR);
+ I810_IOWRITEB(0x10 | 0x04 | 0x01, state->card, PI_CR);
}
}
struct i810_card *card = state->card;
dmabuf->enable &= ~DAC_RUNNING;
- outb(0, card->iobase + PO_CR);
+ I810_IOWRITEB(0, card, PO_CR);
// wait for the card to acknowledge shutdown
- while( inb(card->iobase + PO_CR) != 0 ) ;
+ while( I810_IOREADB(card, PO_CR) != 0 ) ;
// now clear any latent interrupt bits (like the halt bit)
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- outb( inb(card->iobase + PO_PICB), card->iobase + PO_PICB );
+ I810_IOWRITEB( I810_IOREADB(card, PO_PICB), card, PO_PICB );
else
- outb( inb(card->iobase + PO_SR), card->iobase + PO_SR );
- outl( inl(card->iobase + GLOB_STA) & INT_PO, card->iobase + GLOB_STA);
+ I810_IOWRITEB( I810_IOREADB(card, PO_SR), card, PO_SR );
+ I810_IOWRITEL( I810_IOREADL(card, GLOB_STA) & INT_PO, card, GLOB_STA);
}
static void stop_dac(struct i810_state *state)
(dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
dmabuf->enable |= DAC_RUNNING;
// Interrupt enable, LVI enable, DMA enable
- outb(0x10 | 0x04 | 0x01, state->card->iobase + PO_CR);
+ I810_IOWRITEB(0x10 | 0x04 | 0x01, state->card, PO_CR);
}
}
static void start_dac(struct i810_state *state)
sg++;
}
spin_lock_irqsave(&state->card->lock, flags);
- outb(2, state->card->iobase+c->port+OFF_CR); /* reset DMA machine */
- while( inb(state->card->iobase+c->port+OFF_CR) & 0x02 ) ;
- outl((u32)state->card->chandma +
+ I810_IOWRITEB(2, state->card, c->port+OFF_CR); /* reset DMA machine */
+ while( I810_IOREADB(state->card, c->port+OFF_CR) & 0x02 ) ;
+ I810_IOWRITEL((u32)state->card->chandma +
c->num*sizeof(struct i810_channel),
- state->card->iobase+c->port+OFF_BDBAR);
- CIV_TO_LVI(state->card->iobase+c->port, 0);
+ state->card, c->port+OFF_BDBAR);
+ CIV_TO_LVI(state->card, c->port, 0);
spin_unlock_irqrestore(&state->card->lock, flags);
void (*start)(struct i810_state *);
count = dmabuf->count;
- port = state->card->iobase;
if (rec) {
- port += dmabuf->read_channel->port;
+ port = dmabuf->read_channel->port;
trigger = PCM_ENABLE_INPUT;
start = __start_adc;
count = dmabuf->dmasize - count;
} else {
- port += dmabuf->write_channel->port;
+ port = dmabuf->write_channel->port;
trigger = PCM_ENABLE_OUTPUT;
start = __start_dac;
}
return;
start(state);
- while (!(inb(port + OFF_CR) & ((1<<4) | (1<<2))))
+ while (!(I810_IOREADB(state->card, port + OFF_CR) & ((1<<4) | (1<<2))))
;
}
/* MASKP2(swptr, fragsize) - 1 is the tail of our transfer */
x = MODULOP2(MASKP2(dmabuf->swptr, fragsize) - 1, dmabuf->dmasize);
x >>= dmabuf->fragshift;
- outb(x, port + OFF_LVI);
+ I810_IOWRITEB(x, state->card, port + OFF_LVI);
}
static void i810_update_lvi(struct i810_state *state, int rec)
/* this is normal for the end of a read */
/* only give an error if we went past the */
/* last valid sg entry */
- if (GET_CIV(state->card->iobase + PI_BASE) !=
- GET_LVI(state->card->iobase + PI_BASE)) {
+ if (GET_CIV(state->card, PI_BASE) !=
+ GET_LVI(state->card, PI_BASE)) {
printk(KERN_WARNING "i810_audio: DMA overrun on read\n");
dmabuf->error++;
}
/* this is normal for the end of a write */
/* only give an error if we went past the */
/* last valid sg entry */
- if (GET_CIV(state->card->iobase + PO_BASE) !=
- GET_LVI(state->card->iobase + PO_BASE)) {
+ if (GET_CIV(state->card, PO_BASE) !=
+ GET_LVI(state->card, PO_BASE)) {
printk(KERN_WARNING "i810_audio: DMA overrun on write\n");
printk("i810_audio: CIV %d, LVI %d, hwptr %x, "
"count %d\n",
- GET_CIV(state->card->iobase + PO_BASE),
- GET_LVI(state->card->iobase + PO_BASE),
+ GET_CIV(state->card, PO_BASE),
+ GET_LVI(state->card, PO_BASE),
dmabuf->hwptr, dmabuf->count);
dmabuf->error++;
}
struct i810_state *state = card->states[i];
struct i810_channel *c;
struct dmabuf *dmabuf;
- unsigned long port = card->iobase;
+ unsigned long port;
u16 status;
if(!state)
} else /* This can occur going from R/W to close */
continue;
- port+=c->port;
+ port = c->port;
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- status = inw(port + OFF_PICB);
+ status = I810_IOREADW(card, port + OFF_PICB);
else
- status = inw(port + OFF_SR);
+ status = I810_IOREADW(card, port + OFF_SR);
#ifdef DEBUG_INTERRUPTS
printk("NUM %d PORT %X IRQ ( ST%d ", c->num, c->port, status);
if(dmabuf->enable & ADC_RUNNING)
count = dmabuf->dmasize - count;
if (count >= (int)dmabuf->fragsize) {
- outb(inb(port+OFF_CR) | 1, port+OFF_CR);
+ I810_IOWRITEB(I810_IOREADB(card, port+OFF_CR) | 1, card, port+OFF_CR);
#ifdef DEBUG_INTERRUPTS
printk(" CONTINUE ");
#endif
}
}
if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- outw(status & DMA_INT_MASK, port + OFF_PICB);
+ I810_IOWRITEW(status & DMA_INT_MASK, card, port + OFF_PICB);
else
- outw(status & DMA_INT_MASK, port + OFF_SR);
+ I810_IOWRITEW(status & DMA_INT_MASK, card, port + OFF_SR);
}
#ifdef DEBUG_INTERRUPTS
printk(")\n");
spin_lock(&card->lock);
- status = inl(card->iobase + GLOB_STA);
+ status = I810_IOREADL(card, GLOB_STA);
if(!(status & INT_MASK))
{
i810_channel_interrupt(card);
/* clear 'em */
- outl(status & INT_MASK, card->iobase + GLOB_STA);
+ I810_IOWRITEL(status & INT_MASK, card, GLOB_STA);
spin_unlock(&card->lock);
return IRQ_HANDLED;
}
static ssize_t i810_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct i810_state *state = (struct i810_state *)file->private_data;
- struct i810_card *card=state ? state->card : 0;
+ struct i810_card *card=state ? state->card : NULL;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
printk("i810_audio: i810_read called, count = %d\n", count);
#endif
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & DAC_RUNNING)
static ssize_t i810_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct i810_state *state = (struct i810_state *)file->private_data;
- struct i810_card *card=state ? state->card : 0;
+ struct i810_card *card=state ? state->card : NULL;
struct dmabuf *dmabuf = &state->dmabuf;
ssize_t ret;
unsigned long flags;
printk("i810_audio: i810_write called, count = %d\n", count);
#endif
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (dmabuf->enable & ADC_RUNNING)
__stop_adc(state);
}
if (c != NULL) {
- outb(2, state->card->iobase+c->port+OFF_CR); /* reset DMA machine */
- while ( inb(state->card->iobase+c->port+OFF_CR) & 2 )
+ I810_IOWRITEB(2, state->card, c->port+OFF_CR); /* reset DMA machine */
+ while ( I810_IOREADB(state->card, c->port+OFF_CR) & 2 )
cpu_relax();
- outl((u32)state->card->chandma +
+ I810_IOWRITEL((u32)state->card->chandma +
c->num*sizeof(struct i810_channel),
- state->card->iobase+c->port+OFF_BDBAR);
- CIV_TO_LVI(state->card->iobase+c->port, 0);
+ state->card, c->port+OFF_BDBAR);
+ CIV_TO_LVI(state->card, c->port, 0);
}
spin_unlock_irqrestore(&state->card->lock, flags);
/* Global Status and Global Control register are now */
/* used to indicate this. */
- i_glob_cnt = inl(state->card->iobase + GLOB_CNT);
+ i_glob_cnt = I810_IOREADL(state->card, GLOB_CNT);
/* Current # of channels enabled */
if ( i_glob_cnt & 0x0100000 )
switch ( val ) {
case 2: /* 2 channels is always supported */
- outl(i_glob_cnt & 0xffcfffff,
- state->card->iobase + GLOB_CNT);
+ I810_IOWRITEL(i_glob_cnt & 0xffcfffff,
+ state->card, GLOB_CNT);
/* Do we need to change mixer settings???? */
break;
case 4: /* Supported on some chipsets, better check first */
if ( state->card->channels >= 4 ) {
- outl((i_glob_cnt & 0xffcfffff) | 0x100000,
- state->card->iobase + GLOB_CNT);
+ I810_IOWRITEL((i_glob_cnt & 0xffcfffff) | 0x100000,
+ state->card, GLOB_CNT);
/* Do we need to change mixer settings??? */
} else {
val = ret;
break;
case 6: /* Supported on some chipsets, better check first */
if ( state->card->channels >= 6 ) {
- outl((i_glob_cnt & 0xffcfffff) | 0x200000,
- state->card->iobase + GLOB_CNT);
+ I810_IOWRITEL((i_glob_cnt & 0xffcfffff) | 0x200000,
+ state->card, GLOB_CNT);
/* Do we need to change mixer settings??? */
} else {
val = ret;
} else {
i810_set_dac_rate(state, 8000);
/* Put the ACLink in 2 channel mode by default */
- i = inl(card->iobase + GLOB_CNT);
- outl(i & 0xffcfffff, card->iobase + GLOB_CNT);
+ i = I810_IOREADL(card, GLOB_CNT);
+ I810_IOWRITEL(i & 0xffcfffff, card, GLOB_CNT);
}
}
state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
- return 0;
+ return nonseekable_open(inode, file);
}
static int i810_release(struct inode *inode, struct file *file)
int count = 100;
u16 reg_set = IO_REG_OFF(dev) | (reg&0x7f);
- while(count-- && (inb(card->iobase + CAS) & 1))
+ while(count-- && (I810_IOREADB(card, CAS) & 1))
udelay(1);
return inw(card->ac97base + reg_set);
int count = 100;
u16 reg_set = IO_REG_OFF(dev) | (reg&0x7f);
- while(count-- && (inb(card->iobase + CAS) & 1))
+ while(count-- && (I810_IOREADB(card, CAS) & 1))
udelay(1);
outw(data, card->ac97base + reg_set);
if (card->ac97_codec[i] != NULL &&
card->ac97_codec[i]->dev_mixer == minor) {
file->private_data = card->ac97_codec[i];
- return 0;
+ return nonseekable_open(inode, file);
}
}
return -ENODEV;
static inline int i810_ac97_exists(struct i810_card *card, int ac97_number)
{
- u32 reg = inl(card->iobase + GLOB_STA);
+ u32 reg = I810_IOREADL(card, GLOB_STA);
switch (ac97_number) {
case 0:
return reg & (1<<8);
static int i810_ac97_power_up_bus(struct i810_card *card)
{
- u32 reg = inl(card->iobase + GLOB_CNT);
+ u32 reg = I810_IOREADL(card, GLOB_CNT);
int i;
int primary_codec_id = 0;
reg&=~8; /* ACLink on */
/* At this point we deassert AC_RESET # */
- outl(reg , card->iobase + GLOB_CNT);
+ I810_IOWRITEL(reg , card, GLOB_CNT);
/* We must now allow time for the Codec initialisation.
600mS is the specified time */
for(i=0;i<10;i++)
{
- if((inl(card->iobase+GLOB_CNT)&4)==0)
+ if((I810_IOREADL(card, GLOB_CNT)&4)==0)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
* See if the primary codec comes ready. This must happen
* before we start doing DMA stuff
*/
- /* see i810_ac97_init for the next 7 lines (jsaw) */
- inw(card->ac97base);
+ /* see i810_ac97_init for the next 10 lines (jsaw) */
+ if (card->use_mmio)
+ readw(card->ac97base_mmio);
+ else
+ inw(card->ac97base);
if (ich_use_mmio(card)) {
primary_codec_id = (int) readl(card->iobase_mmio + SDM) & 0x3;
printk(KERN_INFO "i810_audio: Primary codec has ID %d\n",
else
printk("no response.\n");
}
- inw(card->ac97base);
+ if (card->use_mmio)
+ readw(card->ac97base_mmio);
+ else
+ inw(card->ac97base);
return 1;
}
/* to check.... */
card->channels = 2;
- reg = inl(card->iobase + GLOB_STA);
+ reg = I810_IOREADL(card, GLOB_STA);
if ( reg & 0x0200000 )
card->channels = 6;
else if ( reg & 0x0100000 )
card->channels = 4;
printk(KERN_INFO "i810_audio: Audio Controller supports %d channels.\n", card->channels);
printk(KERN_INFO "i810_audio: Defaulting to base 2 channel mode.\n");
- reg = inl(card->iobase + GLOB_CNT);
- outl(reg & 0xffcfffff, card->iobase + GLOB_CNT);
+ reg = I810_IOREADL(card, GLOB_CNT);
+ I810_IOWRITEL(reg & 0xffcfffff, card, GLOB_CNT);
for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++)
card->ac97_codec[num_ac97] = NULL;
for (num_ac97 = 0; num_ac97 < nr_ac97_max; num_ac97++) {
/* codec reset */
printk(KERN_INFO "i810_audio: Resetting connection %d\n", num_ac97);
- if (card->use_mmio) readw(card->ac97base_mmio + 0x80*num_ac97);
- else inw(card->ac97base + 0x80*num_ac97);
+ if (card->use_mmio)
+ readw(card->ac97base_mmio + 0x80*num_ac97);
+ else
+ inw(card->ac97base + 0x80*num_ac97);
/* If we have the SDATA_IN Map Register, as on ICH4, we
do not loop thru all possible codec IDs but thru all
goto config_out;
}
dmabuf->count = dmabuf->dmasize;
- CIV_TO_LVI(card->iobase+dmabuf->write_channel->port, -1);
+ CIV_TO_LVI(card, dmabuf->write_channel->port, -1);
local_irq_save(flags);
start_dac(state);
offset = i810_get_dma_addr(state, 0);
return -ENODEV;
}
- if( pci_resource_start(pci_dev, 1) == 0)
- {
- /* MMIO only ICH5 .. here be dragons .. */
- printk(KERN_ERR "i810_audio: Pure MMIO interfaces not yet supported.\n");
- return -ENODEV;
- }
-
if ((card = kmalloc(sizeof(struct i810_card), GFP_KERNEL)) == NULL) {
printk(KERN_ERR "i810_audio: out of memory\n");
return -ENOMEM;
card->ac97base = pci_resource_start (pci_dev, 0);
card->iobase = pci_resource_start (pci_dev, 1);
+ if (!(card->ac97base) || !(card->iobase)) {
+ card->ac97base = 0;
+ card->iobase = 0;
+ }
+
/* if chipset could have mmio capability, check it */
if (card_cap[pci_id->driver_data].flags & CAP_MMIO) {
card->ac97base_mmio_phys = pci_resource_start (pci_dev, 2);
}
}
+ if (!(card->use_mmio) && (!(card->iobase) || !(card->ac97base))) {
+ printk(KERN_ERR "i810_audio: No I/O resources available.\n");
+ goto out_mem;
+ }
+
card->irq = pci_dev->irq;
card->next = devs;
card->magic = I810_CARD_MAGIC;
break;
}
file->private_data = s;
- return 0;
+ return nonseekable_open(inode, file);
}
static int it8172_release_mixdev(struct inode *inode, struct file *file)
unsigned long flags;
int cnt, remainder, avail;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, buffer, count))
unsigned long flags;
int cnt, remainder, avail;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (db->mapped)
return -ENXIO;
if (!access_ok(VERIFY_READ, buffer, count))
s->open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE));
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int it8172_release(struct inode *inode, struct file *file)
if (!card)
return -ENODEV;
file->private_data = card;
- return 0;
+ return nonseekable_open(inode, file);
}
static int ess_release_mixdev(struct inode *inode, struct file *file)
unsigned char *combbuf = NULL;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
up(&s->open_sem);
spin_unlock_irqrestore(&c->lock, flags);
- return 0;
+ return nonseekable_open(inode, file);
}
static int m3_release(struct inode *inode, struct file *file)
file->private_data = card->ac97;
- return 0;
+ return nonseekable_open(inode, file);
}
static int m3_release_mixdev(struct inode *inode, struct file *file)
char *pinfiji = "Pinnacle/Fiji";
#endif
- if (check_region(dev.io, dev.numio)) {
+ if (!request_region(dev.io, dev.numio, "probing")) {
printk(KERN_ERR LOGNAME ": I/O port conflict\n");
return -ENODEV;
}
- request_region(dev.io, dev.numio, "probing");
if (reset_dsp() < 0) {
release_region(dev.io, dev.numio);
/* Joystick */
pinnacle_devs[3].io0 = joystick_io;
- if (check_region(cfg, 2)) {
+ if (!request_region(cfg, 2, "Pinnacle/Fiji Config")) {
printk(KERN_ERR LOGNAME ": Config port 0x%x conflict\n", cfg);
return -EIO;
}
- request_region(cfg, 2, "Pinnacle/Fiji Config");
if (msnd_pinnacle_cfg_devices(cfg, reset, pinnacle_devs)) {
printk(KERN_ERR LOGNAME ": Device configuration error\n");
release_region(cfg, 2);
break;
}
file->private_data = s;
- return 0;
+ return nonseekable_open(inode, file);
}
static int vrc5477_ac97_release_mixdev(struct inode *inode, struct file *file)
int copyCount;
size_t avail;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
unsigned long flags;
int copyCount, avail;
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
ret = 0;
struct list_head *list;
struct vrc5477_ac97_state *s;
int ret=0;
-
+
+ nonseekable_open(inode, file);
for (list = devs.next; ; list = list->next) {
if (list == &devs)
return -ENODEV;
DBG(printk("device num %d open\n",devnum));
+ nonseekable_open(in, f);
for (list = devs.next; ; list = list->next) {
if (list == &devs)
return -ENODEV;
if(dma == NULL || (dma->s) == NULL)
return -ENXIO;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (dma->mmapped || !dma->opened)
return -ENXIO;
if(dma == NULL || (dma->s) == NULL)
return -ENXIO;
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (dma->mmapped || !dma->opened)
return -ENXIO;
COMM ("mixer open");
+ nonseekable_open(inode, file);
for (list = devs.next; ; list = list->next) {
if (list == &devs)
return -ENODEV;
if (last_devc == NULL)
return 0;
- last_devc = 0;
+ last_devc = NULL;
if (hw_config->io_base <= 0)
{
}
VALIDATE_STATE(s);
file->private_data = s;
- return 0;
+ return nonseekable_open(inode, file);
}
static int sv_release_mixdev(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
set_fmt(s, fmtm, fmts);
s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int sv_release(struct inode *inode, struct file *file)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if (count == 0)
int cnt;
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
if (count == 0)
spin_unlock_irqrestore(&s->lock, flags);
s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int sv_midi_release(struct inode *inode, struct file *file)
outb(1, s->iosynth+3); /* enable OPL3 */
s->open_mode |= FMODE_DMFM;
up(&s->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
}
static int sv_dmfm_release(struct inode *inode, struct file *file)
}
-// ---------------------------------------------------------------------
-
-static loff_t cs4297a_llseek(struct file *file, loff_t offset, int origin)
-{
- return -ESPIPE;
-}
-
-
// ---------------------------------------------------------------------
static int cs4297a_open_mixdev(struct inode *inode, struct file *file)
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs4297a: cs4297a_open_mixdev()- 0\n"));
- return 0;
+ return nonseekable_open(inode, file);
}
// ******************************************************************************************
static /*const */ struct file_operations cs4297a_mixer_fops = {
.owner = THIS_MODULE,
- .llseek = cs4297a_llseek,
+ .llseek = no_llseek,
.ioctl = cs4297a_ioctl_mixdev,
.open = cs4297a_open_mixdev,
.release = cs4297a_release_mixdev,
printk(KERN_INFO "cs4297a: cs4297a_read()+ %d \n", count));
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_adc.mapped)
return -ENXIO;
if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
count));
VALIDATE_STATE(s);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (s->dma_dac.mapped)
return -ENXIO;
if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
}
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2,
printk(KERN_INFO "cs4297a: cs4297a_open()- 0\n"));
- return 0;
+ return nonseekable_open(inode, file);
}
// ******************************************************************************************
static /*const */ struct file_operations cs4297a_audio_fops = {
.owner = THIS_MODULE,
- .llseek = cs4297a_llseek,
+ .llseek = no_llseek,
.read = cs4297a_read,
.write = cs4297a_write,
.poll = cs4297a_poll,
pr_debug("trident: trident_read called, count = %d\n", count);
VALIDATE_STATE(state);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
pr_debug("trident: trident_write called, count = %d\n", count);
VALIDATE_STATE(state);
- if (ppos != &file->f_pos)
- return -ESPIPE;
/*
* Guard against an mmap or ioctl while writing
pr_debug("trident: open virtual channel %d, hard channel %d\n",
state->virt, dmabuf->channel->num);
- return 0;
+ return nonseekable_open(inode, file);
}
static int
match:
file->private_data = card->ac97_codec[i];
- return 0;
+ return nonseekable_open(inode, file);
}
static int
* NO WARRANTY
*
* For a list of known bugs (errata) and documentation,
- * see via-audio.pdf in linux/Documentation/DocBook.
+ * see via-audio.pdf in Documentation/DocBook.
* If this documentation does not exist, run "make pdfdocs".
*/
file->private_data = card->ac97;
DPRINTK ("EXIT, returning 0\n");
- return 0;
+ return nonseekable_open(inode, file);
}
static int via_mixer_ioctl (struct inode *inode, struct file *file, unsigned int cmd,
card = file->private_data;
assert (card != NULL);
- if (ppos != &file->f_pos) {
- DPRINTK ("EXIT, returning -ESPIPE\n");
- return -ESPIPE;
- }
-
rc = via_syscall_down (card, nonblock);
if (rc) goto out;
card = file->private_data;
assert (card != NULL);
- if (ppos != &file->f_pos) {
- DPRINTK ("EXIT, returning -ESPIPE\n");
- return -ESPIPE;
- }
-
rc = via_syscall_down (card, nonblock);
if (rc) goto out;
}
DPRINTK ("EXIT, returning 0\n");
- return 0;
+ return nonseekable_open(inode, file);
}
/*
* Sound driver for Silicon Graphics 320 and 540 Visual Workstations'
- * onboard audio. See notes in ../../Documentation/sound/oss/vwsnd .
+ * onboard audio. See notes in Documentation/sound/oss/vwsnd .
*
* Copyright 1999 Silicon Graphics, Inc. All rights reserved.
*
{ 0x0E, "Bad MIDI channel number" },
{ 0x10, "Download Record Error" },
{ 0x80, "Success" },
- { 0x0, 0x0 }
+ { 0 }
};
#define NEEDS_ACK 1
if (cmd == WFC_DOWNLOAD_MULTISAMPLE) {
wfcmd->write_cnt = (unsigned int) rbuf;
- rbuf = 0;
+ rbuf = NULL;
}
DPRINT (WF_DEBUG_CMD, "0x%x [%s] (%d,%d,%d)\n",
wbuf[0] = sample_num & 0x7f;
wbuf[1] = sample_num >> 7;
- if ((x = wavefront_cmd (WFC_DELETE_SAMPLE, 0, wbuf)) == 0) {
+ if ((x = wavefront_cmd (WFC_DELETE_SAMPLE, NULL, wbuf)) == 0) {
dev.sample_status[sample_num] = WF_ST_EMPTY;
}
bptr = munge_int32 (header->number, buf, 2);
munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES);
- if (wavefront_cmd (WFC_DOWNLOAD_PATCH, 0, buf)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_PATCH, NULL, buf)) {
printk (KERN_ERR LOGNAME "download patch failed\n");
return -(EIO);
}
buf[0] = header->number;
munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES);
- if (wavefront_cmd (WFC_DOWNLOAD_PROGRAM, 0, buf)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_PROGRAM, NULL, buf)) {
printk (KERN_WARNING LOGNAME "download patch failed\n");
return -(EIO);
}
{
char rbuf[8];
- if (wavefront_cmd (WFC_REPORT_FREE_MEMORY, rbuf, 0)) {
+ if (wavefront_cmd (WFC_REPORT_FREE_MEMORY, rbuf, NULL)) {
printk (KERN_WARNING LOGNAME "can't get memory stats.\n");
return -1;
} else {
UINT16 sample_short;
UINT32 length;
- UINT16 __user *data_end = 0;
+ UINT16 __user *data_end = NULL;
unsigned int i;
const int max_blksize = 4096/2;
unsigned int written;
if (wavefront_cmd (header->size ?
WFC_DOWNLOAD_SAMPLE : WFC_DOWNLOAD_SAMPLE_HEADER,
- 0, sample_hdr)) {
+ NULL, sample_hdr)) {
printk (KERN_WARNING LOGNAME "sample %sdownload refused.\n",
header->size ? "" : "header ");
return -(EIO);
blocksize = ((length-written+7)&~0x7);
}
- if (wavefront_cmd (WFC_DOWNLOAD_BLOCK, 0, 0)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_BLOCK, NULL, NULL)) {
printk (KERN_WARNING LOGNAME "download block "
"request refused.\n");
return -(EIO);
munge_int32 (header->hdr.a.FrequencyBias, &alias_hdr[20], 3);
munge_int32 (*(&header->hdr.a.FrequencyBias+1), &alias_hdr[23], 2);
- if (wavefront_cmd (WFC_DOWNLOAD_SAMPLE_ALIAS, 0, alias_hdr)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) {
printk (KERN_ERR LOGNAME "download alias failed.\n");
return -(EIO);
}
munge_int32 (((unsigned char *)drum)[i], &drumbuf[1+(i*2)], 2);
}
- if (wavefront_cmd (WFC_DOWNLOAD_EDRUM_PROGRAM, 0, drumbuf)) {
+ if (wavefront_cmd (WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) {
printk (KERN_ERR LOGNAME "download drum failed.\n");
return -(EIO);
}
voices[0] = 32;
- if (wavefront_cmd (WFC_SET_NVOICES, 0, voices)) {
+ if (wavefront_cmd (WFC_SET_NVOICES, NULL, voices)) {
printk (KERN_WARNING LOGNAME
"cannot set number of voices to 32.\n");
goto gone_bad;
unsigned int swptr;
int cnt; /* This many to go in this revolution */
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
YMFDBGW("ymf_write: count %d\n", count);
- if (ppos != &file->f_pos)
- return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
#endif
up(&unit->open_sem);
- return 0;
+ return nonseekable_open(inode, file);
out_nodma:
/*
match:
file->private_data = unit->ac97_codec[i];
- return 0;
+ return nonseekable_open(inode, file);
}
static int ymf_ioctl_mixdev(struct inode *inode, struct file *file,
codec->dma_area_ba = pba;
codec->dma_area_size = size + 0xff;
- if ((off = ((uint) ptr) & 0xff) != 0) {
+ off = (unsigned long)ptr & 0xff;
+ if (off) {
ptr += 0x100 - off;
pba += 0x100 - off;
}
#endif
/* Global resources */
- char mixcapt[2];
- char mixplayb[4];
+ s8 mixcapt[2];
+ s8 mixplayb[4];
#ifndef CHIP_AU8820
- char mixspdif[2];
- char mixa3d[2]; /* mixers which collect all a3d streams. */
- char mixxtlk[2]; /* crosstalk canceler mixer inputs. */
+ s8 mixspdif[2];
+ s8 mixa3d[2]; /* mixers which collect all a3d streams. */
+ s8 mixxtlk[2]; /* crosstalk canceler mixer inputs. */
#endif
u32 fixed_res[5];
static int
snd_vortex_a3d_get(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- //a3dsrc_t *a = (a3dsrc_t*)(kcontrol->private_value);
+ //a3dsrc_t *a = kcontrol->private_data;
/* No read yet. Would this be really useable/needed ? */
return 0;
snd_vortex_a3d_hrtf_put(snd_kcontrol_t *
kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
+ a3dsrc_t *a = kcontrol->private_data;
int changed = 1, i;
int coord[6];
for (i = 0; i < 6; i++)
snd_vortex_a3d_itd_put(snd_kcontrol_t *
kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
+ a3dsrc_t *a = kcontrol->private_data;
int coord[6];
int i, changed = 1;
for (i = 0; i < 6; i++)
snd_vortex_a3d_ild_put(snd_kcontrol_t *
kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
+ a3dsrc_t *a = kcontrol->private_data;
int changed = 1;
int l, r;
/* There may be some scale tranlation needed here. */
snd_vortex_a3d_filter_put(snd_kcontrol_t
* kcontrol, snd_ctl_elem_value_t * ucontrol)
{
- a3dsrc_t *a = (a3dsrc_t *) (kcontrol->private_value);
+ a3dsrc_t *a = kcontrol->private_data;
int i, changed = 1;
int params[6];
for (i = 0; i < 6; i++)
}
static snd_kcontrol_new_t vortex_a3d_kcontrol __devinitdata = {
- .iface = SNDRV_CTL_ELEM_IFACE_PCM,.name =
- "Playback PCM advanced processing",.index =
- 0,.access =
- SNDRV_CTL_ELEM_ACCESS_READWRITE,.private_value =
- 0,.info = snd_vortex_a3d_hrtf_info,.get =
- snd_vortex_a3d_get,.put = snd_vortex_a3d_hrtf_put
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "Playback PCM advanced processing",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = snd_vortex_a3d_hrtf_info,
+ .get = snd_vortex_a3d_get,
+ .put = snd_vortex_a3d_hrtf_put,
};
/* Control (un)registration. */
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_value = (int)&(vortex->a3d[i]);
+ kcontrol->private_data = &vortex->a3d[i];
kcontrol->id.numid = CTRLID_HRTF;
kcontrol->info = snd_vortex_a3d_hrtf_info;
kcontrol->put = snd_vortex_a3d_hrtf_put;
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_value = (int)&(vortex->a3d[i]);
+ kcontrol->private_data = &vortex->a3d[i];
kcontrol->id.numid = CTRLID_ITD;
kcontrol->info = snd_vortex_a3d_itd_info;
kcontrol->put = snd_vortex_a3d_itd_put;
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_value = (int)&(vortex->a3d[i]);
+ kcontrol->private_data = &vortex->a3d[i];
kcontrol->id.numid = CTRLID_GAINS;
kcontrol->info = snd_vortex_a3d_ild_info;
kcontrol->put = snd_vortex_a3d_ild_put;
if ((kcontrol =
snd_ctl_new1(&vortex_a3d_kcontrol, vortex)) == NULL)
return -ENOMEM;
- kcontrol->private_value = (int)&(vortex->a3d[i]);
+ kcontrol->private_data = &vortex->a3d[i];
kcontrol->id.numid = CTRLID_FILTER;
kcontrol->info = snd_vortex_a3d_filter_info;
kcontrol->put = snd_vortex_a3d_filter_put;
snd_azf3328_setdmaa(chip, runtime->dma_addr, snd_pcm_lib_period_bytes(substream), snd_pcm_lib_buffer_bytes(substream), 0);
spin_lock_irqsave(&chip->reg_lock, flags);
-#if WIN9X
+#ifdef WIN9X
/* FIXME: enable playback/recording??? */
status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
outw(status1, chip->codec_port+IDX_IO_PLAY_FLAGS);
snd_azf3328_setdmaa(chip, runtime->dma_addr, snd_pcm_lib_period_bytes(substream), snd_pcm_lib_buffer_bytes(substream), 1);
spin_lock_irqsave(&chip->reg_lock, flags);
-#if WIN9X
+#ifdef WIN9X
/* FIXME: enable playback/recording??? */
status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
outw(status1, chip->codec_port+IDX_IO_REC_FLAGS);
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
-#if QUERY_HARDWARE
+#ifdef QUERY_HARDWARE
bufptr = inl(chip->codec_port+IDX_IO_PLAY_DMA_START_1);
#else
bufptr = substream->runtime->dma_addr;
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
-#if QUERY_HARDWARE
+#ifdef QUERY_HARDWARE
bufptr = inl(chip->codec_port+IDX_IO_REC_DMA_START_1);
#else
bufptr = substream->runtime->dma_addr;
}
static long snd_cs4281_BA0_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf, long count)
+ struct file *file, char __user *buf,
+ unsigned long count, unsigned long pos)
{
long size;
cs4281_t *chip = snd_magic_cast(cs4281_t, entry->private_data, return -ENXIO);
size = count;
- if (file->f_pos + size > CS4281_BA0_SIZE)
- size = (long)CS4281_BA0_SIZE - file->f_pos;
+ if (pos + size > CS4281_BA0_SIZE)
+ size = (long)CS4281_BA0_SIZE - pos;
if (size > 0) {
- if (copy_to_user_fromio(buf, chip->ba0 + file->f_pos, size))
+ if (copy_to_user_fromio(buf, chip->ba0 + pos, size))
return -EFAULT;
- file->f_pos += size;
}
return size;
}
static long snd_cs4281_BA1_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf, long count)
+ struct file *file, char __user *buf,
+ unsigned long count, unsigned long pos)
{
long size;
cs4281_t *chip = snd_magic_cast(cs4281_t, entry->private_data, return -ENXIO);
size = count;
- if (file->f_pos + size > CS4281_BA1_SIZE)
- size = (long)CS4281_BA1_SIZE - file->f_pos;
+ if (pos + size > CS4281_BA1_SIZE)
+ size = (long)CS4281_BA1_SIZE - pos;
if (size > 0) {
- if (copy_to_user_fromio(buf, chip->ba1 + file->f_pos, size))
+ if (copy_to_user_fromio(buf, chip->ba1 + pos, size))
return -EFAULT;
- file->f_pos += size;
}
return size;
}
*/
static long snd_cs46xx_io_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf, long count)
+ struct file *file, char __user *buf,
+ unsigned long count, unsigned long pos)
{
long size;
snd_cs46xx_region_t *region = (snd_cs46xx_region_t *)entry->private_data;
size = count;
- if (file->f_pos + (size_t)size > region->size)
- size = region->size - file->f_pos;
+ if (pos + (size_t)size > region->size)
+ size = region->size - pos;
if (size > 0) {
- if (copy_to_user_fromio(buf, region->remap_addr + file->f_pos, size))
+ if (copy_to_user_fromio(buf, region->remap_addr + pos, size))
return -EFAULT;
- file->f_pos += size;
}
return size;
}
#define TOTAL_SIZE_CODE (0x200*8)
static long snd_emu10k1_fx8010_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf, long count)
+ struct file *file, char __user *buf,
+ unsigned long count, unsigned long pos)
{
long size;
emu10k1_t *emu = snd_magic_cast(emu10k1_t, entry->private_data, return -ENXIO);
offset = emu->audigy ? A_FXGPREGBASE : FXGPREGBASE;
}
size = count;
- if (file->f_pos + size > entry->size)
- size = (long)entry->size - file->f_pos;
+ if (pos + size > entry->size)
+ size = (long)entry->size - pos;
if (size > 0) {
unsigned int *tmp;
long res;
unsigned int idx;
if ((tmp = kmalloc(size + 8, GFP_KERNEL)) == NULL)
return -ENOMEM;
- for (idx = 0; idx < ((file->f_pos & 3) + size + 3) >> 2; idx++)
- tmp[idx] = snd_emu10k1_ptr_read(emu, offset + idx + (file->f_pos >> 2), 0);
- if (copy_to_user(buf, ((char *)tmp) + (file->f_pos & 3), size))
+ for (idx = 0; idx < ((pos & 3) + size + 3) >> 2; idx++)
+ tmp[idx] = snd_emu10k1_ptr_read(emu, offset + idx + (pos >> 2), 0);
+ if (copy_to_user(buf, ((char *)tmp) + (pos & 3), size))
res = -EFAULT;
else {
res = size;
- file->f_pos += size;
}
kfree(tmp);
return res;
if ((val & 0xff00) < 0x1f00)
val += 0x0100;
}
+ if (val == 0x1f1f)
+ val |= 0x8000;
snd_ac97_write_cache(chip->ac97, AC97_MASTER, val);
snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
&chip->master_volume->id);
snd_ice1712_hoontech_cards,
snd_ice1712_delta_cards,
snd_ice1712_ews_cards,
- 0,
+ NULL,
};
static unsigned char __devinit snd_ice1712_read_i2c(ice1712_t *ice,
return -EBUSY; /* FIXME: should handle blocking mode properly */
}
up(&ice->open_mutex);
- runtime->private_data = (void*)(1 << (substream->number + 4));
+ runtime->private_data = (void*)(1UL << (substream->number + 4));
ice->playback_con_substream_ds[substream->number] = substream;
runtime->hw = snd_vt1724_2ch_stereo;
snd_pcm_set_sync(substream);
snd_vt1724_revo_cards,
snd_vt1724_amp_cards,
snd_vt1724_aureon_cards,
- 0,
+ NULL,
};
{ 0x5455, "ALi M5455" },
{ 0x746d, "AMD AMD8111" },
#endif
- { 0, 0 },
+ { 0 },
};
static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
.amp_gpio = 0x03,
},
/* END */
- { 0 }
+ { NULL }
};
mixart_BA0 proc interface for BAR 0 - read callback
*/
static long snd_mixart_BA0_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf, long count)
+ struct file *file, char __user *buf,
+ unsigned long count, unsigned long pos)
{
mixart_mgr_t *mgr = snd_magic_cast(mixart_mgr_t, entry->private_data, return -ENXIO);
count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
if(count <= 0)
return 0;
- if(file->f_pos + count > MIXART_BA0_SIZE)
- count = (long)(MIXART_BA0_SIZE - file->f_pos);
- if(copy_to_user_fromio(buf, MIXART_MEM( mgr, file->f_pos ), count))
+ if(pos + count > MIXART_BA0_SIZE)
+ count = (long)(MIXART_BA0_SIZE - pos);
+ if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count))
return -EFAULT;
- file->f_pos += count;
return count;
}
mixart_BA1 proc interface for BAR 1 - read callback
*/
static long snd_mixart_BA1_read(snd_info_entry_t *entry, void *file_private_data,
- struct file *file, char __user *buf, long count)
+ struct file *file, char __user *buf,
+ unsigned long count, unsigned long pos)
{
mixart_mgr_t *mgr = snd_magic_cast(mixart_mgr_t, entry->private_data, return -ENXIO);
count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
if(count <= 0)
return 0;
- if(file->f_pos + count > MIXART_BA1_SIZE)
- count = (long)(MIXART_BA1_SIZE - file->f_pos);
- if(copy_to_user_fromio(buf, MIXART_REG( mgr, file->f_pos ), count))
+ if(pos + count > MIXART_BA1_SIZE)
+ count = (long)(MIXART_BA1_SIZE - pos);
+ if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count))
return -EFAULT;
- file->f_pos += count;
return count;
}
snd_nm256_capture_copy(snd_pcm_substream_t *substream,
int channel, /* not used (interleaved data) */
snd_pcm_uframes_t pos,
- void *dst,
+ void __user *dst,
snd_pcm_uframes_t count)
{
snd_pcm_runtime_t *runtime = substream->runtime;
hdsp->irq = -1;
hdsp->state = 0;
- hdsp->midi[0].rmidi = 0;
- hdsp->midi[1].rmidi = 0;
- hdsp->midi[0].input = 0;
- hdsp->midi[1].input = 0;
- hdsp->midi[0].output = 0;
- hdsp->midi[1].output = 0;
+ hdsp->midi[0].rmidi = NULL;
+ hdsp->midi[1].rmidi = NULL;
+ hdsp->midi[0].input = NULL;
+ hdsp->midi[1].input = NULL;
+ hdsp->midi[0].output = NULL;
+ hdsp->midi[1].output = NULL;
spin_lock_init(&hdsp->midi[0].lock);
spin_lock_init(&hdsp->midi[1].lock);
hdsp->iobase = 0;
- hdsp->res_port = 0;
+ hdsp->res_port = NULL;
hdsp->control_register = 0;
hdsp->control2_register = 0;
hdsp->io_type = Undefined;
sonic->mode |= SV_MODE_PLAY;
sonic->playback_substream = substream;
runtime->hw = snd_sonicvibes_playback;
- snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_sonicvibes_hw_constraint_dac_rate, 0, SNDRV_PCM_HW_PARAM_RATE, -1);
+ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_sonicvibes_hw_constraint_dac_rate, NULL, SNDRV_PCM_HW_PARAM_RATE, -1);
return 0;
}
menu "ALSA PowerMac devices"
depends on SND!=n && PPC
+comment "ALSA PowerMac requires I2C"
+ depends on SND && I2C=n
+
config SND_POWERMAC
tristate "PowerMac (AWACS, DACA, Burgundy, Tumbler, Keywest)"
- depends on SND
+ depends on SND && I2C
select SND_PCM
endmenu
sound = sound->next;
if (! sound)
return -ENODEV;
- prop = (unsigned int *) get_property(sound, "sub-frame", 0);
+ prop = (unsigned int *) get_property(sound, "sub-frame", NULL);
if (prop && *prop < 16)
chip->subframe = *prop;
/* This should be verified on older screamers */
// chip->can_byte_swap = 0; /* FIXME: check this */
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
- prop = (unsigned int *)get_property(sound, "device-id", 0);
+ prop = (unsigned int *)get_property(sound, "device-id", NULL);
if (prop)
chip->device_id = *prop;
chip->has_iic = (find_devices("perch") != NULL);
{
if (u->urb) {
usb_free_urb(u->urb);
- u->urb = 0;
+ u->urb = NULL;
}
if (u->buf) {
kfree(u->buf);
- u->buf = 0;
+ u->buf = NULL;
}
}
release_urb_ctx(&subs->syncurb[i]);
if (subs->tmpbuf) {
kfree(subs->tmpbuf);
- subs->tmpbuf = 0;
+ subs->tmpbuf = NULL;
}
subs->nurbs = 0;
}
{
if (kctl->private_data) {
snd_magic_kfree((void *)kctl->private_data);
- kctl->private_data = 0;
+ kctl->private_data = NULL;
}
}
usb_mixer_elem_info_t *cval = snd_magic_cast(usb_mixer_elem_info_t, kctl->private_data,);
num_ins = cval->max;
snd_magic_kfree(cval);
- kctl->private_data = 0;
+ kctl->private_data = NULL;
}
if (kctl->private_value) {
char **itemlist = (char **)kctl->private_value;